[jit] Make operation call accept Stack& instead Stack* (#63414)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/63414
Misuse of raw pointer in here where stack is never nullable.
ghstack-source-id: 136938318
Test Plan:
compiles.
Imported from OSS
Reviewed By: ejguan
Differential Revision: D30375410
fbshipit-source-id: 9d65b620bb76d90d886c800f54308520095d58ee
diff --git a/aten/src/ATen/core/dispatch/Dispatcher.h b/aten/src/ATen/core/dispatch/Dispatcher.h
index fd32a72..cfa6b74 100644
--- a/aten/src/ATen/core/dispatch/Dispatcher.h
+++ b/aten/src/ATen/core/dispatch/Dispatcher.h
@@ -344,6 +344,10 @@
c10::Dispatcher::singleton().callBoxed(*this, stack);
}
+ void callBoxed(Stack& stack) const {
+ callBoxed(&stack);
+ }
+
void redispatchBoxed(DispatchKeySet ks, Stack* stack) const {
c10::Dispatcher::singleton().redispatchBoxed(*this, ks, stack);
}
diff --git a/aten/src/ATen/core/stack.h b/aten/src/ATen/core/stack.h
index ffc0e8f..021e8a0 100644
--- a/aten/src/ATen/core/stack.h
+++ b/aten/src/ATen/core/stack.h
@@ -1,6 +1,9 @@
#pragma once
+#include <type_traits>
+
#include <ATen/core/ivalue.h>
+#include <c10/util/Deprecated.h>
// TODO move this to c10 namespace
@@ -9,7 +12,42 @@
using c10::IValue;
using Stack = std::vector<IValue>;
-using Operation = std::function<void(Stack*)>;
+
+class Operation {
+ template <typename F, typename Arg>
+ using accepts = std::is_constructible<std::function<void(Arg)>, F&&>;
+
+ public:
+ template <typename F,
+ std::enable_if_t<accepts<F, Stack*>::value, int> = 0>
+ C10_DEPRECATED_MESSAGE("Please use void(Stack&) to register operator instead.")
+ Operation(F&& raw): op_([raw = std::forward<F>(raw)](Stack& stack) {
+ raw(&stack);
+ }) {}
+
+ template <typename F,
+ std::enable_if_t<accepts<F, Stack&>::value &&
+ !std::is_same<std::decay_t<F>, Operation>::value, int> = 0>
+ Operation(F&& op): op_(std::forward<F>(op)) {}
+
+ Operation(std::nullptr_t) noexcept {}
+
+ explicit operator bool() const noexcept {
+ return op_ ? true : false;
+ }
+
+ void operator()(Stack& stack) {
+ op_(stack);
+ }
+
+ template <typename T>
+ T* target() noexcept {
+ return op_.target<T>();
+ }
+
+ private:
+ std::function<void(Stack&)> op_;
+};
// An operation with N inputs and M outputs pops the last N inputs off
// the stack and pushes its M inputs onto the stack
diff --git a/test/cpp/jit/test_alias_analysis.cpp b/test/cpp/jit/test_alias_analysis.cpp
index 1bd556a..eef529d 100644
--- a/test/cpp/jit/test_alias_analysis.cpp
+++ b/test/cpp/jit/test_alias_analysis.cpp
@@ -1,11 +1,11 @@
#include <gtest/gtest.h>
#include <torch/csrc/autograd/generated/variable_factories.h>
+#include <torch/csrc/jit/frontend/ir_emitter.h>
+#include <torch/csrc/jit/ir/alias_analysis.h>
#include <torch/csrc/jit/ir/irparser.h>
-#include "torch/csrc/jit/frontend/ir_emitter.h"
-#include "torch/csrc/jit/ir/alias_analysis.h"
-#include "torch/csrc/jit/runtime/custom_operator.h"
-#include "torch/csrc/utils/memory.h"
+#include <torch/csrc/jit/runtime/custom_operator.h>
+#include <torch/csrc/utils/memory.h>
namespace torch {
namespace jit {
@@ -484,7 +484,7 @@
TEST(WriteTrackingTest, Basic) {
RegisterOperators reg({Operator(
"prim::creates_alias(Tensor(a) x) -> Tensor(a)",
- [](Stack* s) {},
+ [](Stack&) {},
aliasAnalysisFromSchema())});
const auto creates_alias = Symbol::fromQualString("prim::creates_alias");
auto graph = std::make_shared<Graph>();
@@ -949,11 +949,11 @@
RegisterOperators reg(
{Operator(
"prim::returns_wildcard(Tensor a) -> Tensor(*)",
- [](Stack* stack) {},
+ [](Stack&) {},
aliasAnalysisFromSchema()),
Operator(
"prim::writes(Tensor(z!) a) -> Tensor(a)",
- [](Stack* stack) {},
+ [](Stack&) {},
aliasAnalysisFromSchema())});
const auto returns_wildcard =
Symbol::fromQualString("prim::returns_wildcard");
diff --git a/test/cpp/jit/test_custom_operators.cpp b/test/cpp/jit/test_custom_operators.cpp
index a34ca33..39be82e 100644
--- a/test/cpp/jit/test_custom_operators.cpp
+++ b/test/cpp/jit/test_custom_operators.cpp
@@ -31,7 +31,7 @@
Stack stack;
push(stack, 2.0f, at::ones(5));
- op->getOperation()(&stack);
+ op->getOperation()(stack);
at::Tensor output;
pop(stack, output);
@@ -61,7 +61,7 @@
Stack stack;
push(stack, 2.0f, at::ones(5));
- op->getOperation()(&stack);
+ op->getOperation()(stack);
at::Tensor output;
pop(stack, output);
@@ -109,7 +109,7 @@
c10::List<c10::complex<double>>(
{c10::complex<double>(2.4, -5.5), c10::complex<double>(-1.3, 2)}));
push(stack, c10::List<at::Tensor>({at::ones(5)}));
- op->getOperation()(&stack);
+ op->getOperation()(stack);
c10::List<double> output;
pop(stack, output);
@@ -140,7 +140,7 @@
Stack stack;
push(stack, c10::List<at::Tensor>({at::ones(5)}));
- op->getOperation()(&stack);
+ op->getOperation()(stack);
c10::List<at::Tensor> output;
pop(stack, output);
@@ -204,7 +204,7 @@
torch::jit::RegisterOperators reg({OperatorGenerator(
TORCH_SELECTIVE_NAME_IN_SCHEMA(
op_list, "foofoo::not_exist(float a, Tensor b) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
at::Tensor b;
@@ -223,7 +223,7 @@
torch::jit::RegisterOperators reg({OperatorGenerator(
TORCH_SELECTIVE_NAME_IN_SCHEMA(
op_list, "foofoo::bar.template(float a, Tensor b) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
at::Tensor b;
@@ -249,7 +249,7 @@
Stack stack;
push(stack, 2.0f, at::ones(5));
- op->getOperation()(&stack);
+ op->getOperation()(stack);
at::Tensor output;
pop(stack, output);
diff --git a/test/cpp/jit/test_misc.cpp b/test/cpp/jit/test_misc.cpp
index 9f8a732..305d36a 100644
--- a/test/cpp/jit/test_misc.cpp
+++ b/test/cpp/jit/test_misc.cpp
@@ -1493,11 +1493,11 @@
RegisterOperators reg({
Operator(
"prim::test_none() -> int?",
- [](Stack* stack) { push(stack, IValue()); },
+ [](Stack& stack) { push(stack, IValue()); },
aliasAnalysisFromSchema()),
Operator(
"prim::is_none(int? a) -> bool",
- [](Stack* stack) {
+ [](Stack& stack) {
IValue a = pop(stack);
if (a.isNone()) {
push(stack, true);
diff --git a/test/cpp/jit/test_schema_matching.cpp b/test/cpp/jit/test_schema_matching.cpp
index 31d332b..c56d0bc 100644
--- a/test/cpp/jit/test_schema_matching.cpp
+++ b/test/cpp/jit/test_schema_matching.cpp
@@ -15,7 +15,7 @@
RegisterOperators reg({
Operator(
"aten::test_vartype(t[] a, t b) -> (t)",
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<double> list;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
@@ -54,7 +54,7 @@
RegisterOperators reg({
Operator(
"aten::test_vartype2(t a, t[] b) -> (t[])",
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
c10::List<double> list;
diff --git a/test/cpp/jit/test_utils.cpp b/test/cpp/jit/test_utils.cpp
index f2fb9e1..8da101e 100644
--- a/test/cpp/jit/test_utils.cpp
+++ b/test/cpp/jit/test_utils.cpp
@@ -273,7 +273,7 @@
// because it always produces empty Tensors.
Operator(
"prim::MakeTestTensor() -> Tensor",
- [](Stack* stack) { push(stack, at::Tensor()); },
+ [](Stack& stack) { push(stack, at::Tensor()); },
aliasAnalysisFromSchema()),
});
} // namespace
diff --git a/test/custom_operator/test_custom_ops.cpp b/test/custom_operator/test_custom_ops.cpp
index 7c6a187..ec22568 100644
--- a/test/custom_operator/test_custom_ops.cpp
+++ b/test/custom_operator/test_custom_ops.cpp
@@ -30,7 +30,7 @@
torch::jit::Stack stack;
torch::jit::push(stack, std::forward<Args>(args)...);
- op->getOperation()(&stack);
+ op->getOperation()(stack);
TORCH_INTERNAL_ASSERT(1 == stack.size());
return torch::jit::pop(stack).to<Result>();
diff --git a/torch/csrc/autograd/record_function_ops.cpp b/torch/csrc/autograd/record_function_ops.cpp
index 7e621f9..9650c35 100644
--- a/torch/csrc/autograd/record_function_ops.cpp
+++ b/torch/csrc/autograd/record_function_ops.cpp
@@ -79,7 +79,7 @@
jit::RegisterOperators reg_fut_ops({
jit::Operator(
"profiler::_call_end_callbacks_on_jit_fut(Tensor x, Future(t) y) -> Future(t)",
- [](jit::Stack* stack) {
+ [](jit::Stack& stack) {
// Pop inputs, which should be a future and a tensor
auto fut = jit::pop(stack).toFuture();
auto tensor = jit::pop(stack).toTensor();
diff --git a/torch/csrc/distributed/rpc/request_callback_no_python.cpp b/torch/csrc/distributed/rpc/request_callback_no_python.cpp
index 07d5c61..5eada8d 100644
--- a/torch/csrc/distributed/rpc/request_callback_no_python.cpp
+++ b/torch/csrc/distributed/rpc/request_callback_no_python.cpp
@@ -582,7 +582,7 @@
std::vector<c10::Stream> streams) const {
c10::MultiStreamGuard guard(streams);
try {
- op.getOperation()(&stack);
+ op.getOperation()(stack);
} catch (const std::exception&) {
return asFuture(std::current_exception());
}
diff --git a/torch/csrc/jit/codegen/cuda/interface.cpp b/torch/csrc/jit/codegen/cuda/interface.cpp
index 009ae21..cf8f378 100644
--- a/torch/csrc/jit/codegen/cuda/interface.cpp
+++ b/torch/csrc/jit/codegen/cuda/interface.cpp
@@ -182,8 +182,8 @@
Operator(
prim::CudaFusionGroup,
[](const Node* node) -> Operation {
- return [node](Stack* stack) {
- fuser::cuda::runFusionGroup(node, *stack);
+ return [node](Stack& stack) {
+ fuser::cuda::runFusionGroup(node, stack);
};
},
aliasAnalysisSpecialCase()),
@@ -196,7 +196,7 @@
// if we would ever return refined tensor, which would change aliasing
// analysis, we should update aliasdb pass.
[](const Node* node) -> Operation {
- return [node](Stack* stack) {
+ return [node](Stack& stack) {
// TODO: check latency here!!!!
std::vector<TypePtr> types = node->tys(attr::types);
const auto num_inputs = types.size();
diff --git a/torch/csrc/jit/codegen/fuser/fallback.cpp b/torch/csrc/jit/codegen/fuser/fallback.cpp
index 59fe7e6..60a5d72 100644
--- a/torch/csrc/jit/codegen/fuser/fallback.cpp
+++ b/torch/csrc/jit/codegen/fuser/fallback.cpp
@@ -26,7 +26,7 @@
[](const Node* node) -> Operation {
int64_t dim = node->i(attr::dim);
int64_t num_inputs = node->inputs().size();
- return [dim, num_inputs](Stack* stack) {
+ return [dim, num_inputs](Stack& stack) {
auto result = at::cat(
fmap(
last(stack, num_inputs),
diff --git a/torch/csrc/jit/mobile/function.cpp b/torch/csrc/jit/mobile/function.cpp
index 0775a55..127bd5f 100644
--- a/torch/csrc/jit/mobile/function.cpp
+++ b/torch/csrc/jit/mobile/function.cpp
@@ -67,7 +67,7 @@
auto jit_op = findOperatorFor(opname);
std::vector<c10::Argument> args;
if (jit_op) {
- fn = [jit_op](Stack& stack) { jit_op->getOperation()(&stack); };
+ fn = [jit_op](Stack& stack) { jit_op->getOperation()(stack); };
args = jit_op->schema().arguments();
} else {
auto op = c10::Dispatcher::singleton().findSchema(opname_c10);
diff --git a/torch/csrc/jit/passes/batch_mm.cpp b/torch/csrc/jit/passes/batch_mm.cpp
index 815a1bc..944e278 100644
--- a/torch/csrc/jit/passes/batch_mm.cpp
+++ b/torch/csrc/jit/passes/batch_mm.cpp
@@ -109,11 +109,11 @@
RegisterOperators mm_tree_reduction_reg({Operator(
"prim::MMTreeReduce(...) -> Tensor",
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
std::vector<at::Tensor> inputs;
inputs.reserve(num_inputs);
- for (auto it = stack->end() - num_inputs; it != stack->end(); ++it) {
+ for (auto it = stack.end() - num_inputs; it != stack.end(); ++it) {
inputs.push_back(std::move(*it).toTensor());
}
drop(stack, num_inputs);
@@ -320,11 +320,11 @@
[](const Node* node) -> Operation {
size_t num_other_side_inputs = node->inputs().size() - 1;
Side single_side = static_cast<Side>(node->i(Symbol::attr("side")));
- return [num_other_side_inputs, single_side](Stack* stack) {
+ return [num_other_side_inputs, single_side](Stack& stack) {
at::Tensor side_input;
std::vector<at::Tensor> other_side_inputs;
other_side_inputs.reserve(num_other_side_inputs);
- for (auto it = stack->end() - num_other_side_inputs; it != stack->end();
+ for (auto it = stack.end() - num_other_side_inputs; it != stack.end();
++it) {
other_side_inputs.push_back(std::move(*it).toTensor());
}
@@ -343,18 +343,18 @@
mm_out,
num_other_side_inputs,
/*dim=*/single_side == Side::LHS ? 1 : 0);
- stack->insert(
- stack->end(),
+ stack.insert(
+ stack.end(),
std::make_move_iterator(outputs.begin()),
std::make_move_iterator(outputs.end()));
} else {
if (single_side == Side::LHS) {
for (at::Tensor& other : other_side_inputs) {
- stack->emplace_back(side_input.mm(other));
+ stack.emplace_back(side_input.mm(other));
}
} else {
for (at::Tensor& other : other_side_inputs) {
- stack->emplace_back(other.mm(side_input));
+ stack.emplace_back(other.mm(side_input));
}
}
}
diff --git a/torch/csrc/jit/passes/constant_propagation.cpp b/torch/csrc/jit/passes/constant_propagation.cpp
index a7f831a..3a28eae 100644
--- a/torch/csrc/jit/passes/constant_propagation.cpp
+++ b/torch/csrc/jit/passes/constant_propagation.cpp
@@ -78,7 +78,7 @@
try {
auto op = n->getOperation();
- op(&stack);
+ op(stack);
} catch (...) {
return c10::nullopt;
}
diff --git a/torch/csrc/jit/passes/decompose_ops.cpp b/torch/csrc/jit/passes/decompose_ops.cpp
index 7f935a1..0706c9c 100644
--- a/torch/csrc/jit/passes/decompose_ops.cpp
+++ b/torch/csrc/jit/passes/decompose_ops.cpp
@@ -59,7 +59,7 @@
RegisterOperators reg_ops(
{Operator(
"aten::_ncf_unsqueeze(Tensor(a) self, int ndim) -> Tensor(a)",
- [](Stack* stack) {
+ [](Stack& stack) {
const int64_t ndim = pop(stack).toInt();
auto self = pop(stack).toTensor();
c10::SmallVector<int64_t, 8> sizes(ndim, 1);
@@ -70,7 +70,7 @@
aliasAnalysisFromSchema()),
Operator(
"aten::_ncf_view(Tensor(a) self, int[] input_shape, int normalized_ndim) -> Tensor(a)",
- [](Stack* stack) {
+ [](Stack& stack) {
const int64_t normalized_ndim = pop(stack).toInt();
auto input_shape = pop(stack).toIntList();
auto self = pop(stack).toTensor();
diff --git a/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp b/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp
index 6d218af..542e136 100644
--- a/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp
+++ b/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp
@@ -235,7 +235,7 @@
Operation createUnaryOp(
std::function<void(at::Tensor output, at::Tensor input)> aten_op,
bool inplace = false) {
- return [aten_op, inplace](Stack* stack) {
+ return [aten_op, inplace](Stack& stack) {
auto a = pop(stack).toTensor();
c10::impl::ExcludeDispatchKeyGuard edkg(c10::autograd_dispatch_keyset);
// we cast `a` to an `ideep::tensor`, so we can get at its descriptor
@@ -275,7 +275,7 @@
};
}
-void MKLDNNLayerNormOp(Stack* stack, bool inplace) {
+void MKLDNNLayerNormOp(Stack& stack, bool inplace) {
c10::impl::ExcludeDispatchKeyGuard edkg(c10::autograd_dispatch_keyset);
// enable_cudnn not used
@@ -303,7 +303,7 @@
};
Operation BroadOp(const Node* node) {
- return [](Stack* stack) {
+ return [](Stack& stack) {
auto b = pop(stack).toTensor();
auto a = pop(stack).toTensor();
auto b_size = b.sizes();
@@ -471,17 +471,17 @@
const RegisterOperators MKLDNNLayerNormOpReg({
torch::jit::Operator(
"prim::MKLDNNLayerNorm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor",
- [](Stack* stack) { MKLDNNLayerNormOp(stack, false); },
+ [](Stack& stack) { MKLDNNLayerNormOp(stack, false); },
AliasAnalysisKind::FROM_SCHEMA),
torch::jit::Operator(
"prim::MKLDNNLayerNorm_(Tensor(a!) input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor(a!)",
- [](Stack* stack) { MKLDNNLayerNormOp(stack, true); },
+ [](Stack& stack) { MKLDNNLayerNormOp(stack, true); },
AliasAnalysisKind::FROM_SCHEMA),
});
Operation ConstantMKLDNNTensorOp(const Node* node) {
const auto& t = node->t(attr::value);
- return [t](Stack* stack) {
+ return [t](Stack& stack) {
push(stack, t);
return 0;
};
@@ -509,7 +509,7 @@
// XXX: this follows the schema convention of conv2d/conv3d, not
// aten::mkldnn_convolution, which is different for some reason!
"prim::mkldnn_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor",
- [](jit::Stack* stack) {
+ [](jit::Stack& stack) {
int64_t groups = pop(stack).toInt();
auto dilation = pop(stack).toIntVector();
auto padding = pop(stack).toIntVector();
@@ -558,7 +558,7 @@
// in default bindings
jit::Operator(
"prim::MKLDNNScalarMul(Tensor self, Scalar other) -> Tensor",
- [](jit::Stack* stack) {
+ [](jit::Stack& stack) {
c10::impl::ExcludeDispatchKeyGuard edkg(
c10::autograd_dispatch_keyset);
float other = pop(stack).toScalar().toFloat();
@@ -576,7 +576,7 @@
aliasAnalysisFromSchema()),
jit::Operator(
"prim::MKLDNNScalarMul_(Tensor(a!) self, Scalar other) -> Tensor(a!)",
- [](jit::Stack* stack) {
+ [](jit::Stack& stack) {
c10::impl::ExcludeDispatchKeyGuard edkg(
c10::autograd_dispatch_keyset);
float other = pop(stack).toScalar().toFloat();
diff --git a/torch/csrc/jit/passes/shape_analysis.cpp b/torch/csrc/jit/passes/shape_analysis.cpp
index 47cd30b..5e13829 100644
--- a/torch/csrc/jit/passes/shape_analysis.cpp
+++ b/torch/csrc/jit/passes/shape_analysis.cpp
@@ -410,7 +410,7 @@
// is to uncover any mistakes we could make when editing this code,
// and eventually it shouldn't matter, because this phase should be
// preceded by schema checking.
- op(&stack);
+ op(stack);
AT_ASSERT(stack.size() == node->outputs().size());
for (const auto i : c10::irange(stack.size())) {
diff --git a/torch/csrc/jit/passes/tensorexpr_fuser.cpp b/torch/csrc/jit/passes/tensorexpr_fuser.cpp
index 833c338..1d5128c 100644
--- a/torch/csrc/jit/passes/tensorexpr_fuser.cpp
+++ b/torch/csrc/jit/passes/tensorexpr_fuser.cpp
@@ -1299,9 +1299,9 @@
Operation createTensorExprOp(const Node* node) {
auto kernel =
std::make_shared<tensorexpr::TensorExprKernel>(node->g(attr::Subgraph));
- return [kernel](Stack* stack) {
+ return [kernel](Stack& stack) {
RECORD_FUNCTION("TensorExpr", std::vector<c10::IValue>());
- kernel->run(*stack);
+ kernel->run(stack);
return 0;
};
}
diff --git a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp
index cd894b4..ae3a962 100644
--- a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp
+++ b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp
@@ -253,7 +253,7 @@
const auto inputsDeepCopy = deepCopy(stack);
// Run the op
- node->getOperation()(&stack);
+ node->getOperation()(stack);
const auto outputs = std::move(stack);
diff --git a/torch/csrc/jit/python/pybind_utils.h b/torch/csrc/jit/python/pybind_utils.h
index 0138231..eff1ddc 100644
--- a/torch/csrc/jit/python/pybind_utils.h
+++ b/torch/csrc/jit/python/pybind_utils.h
@@ -1151,7 +1151,7 @@
Stack stack = std::get<1>(opWithStack);
{
pybind11::gil_scoped_release no_gil_guard;
- found_op->getOperation()(&stack);
+ found_op->getOperation()(stack);
}
return createPyObjectForStack(std::move(stack));
diff --git a/torch/csrc/jit/python/python_interpreter.cpp b/torch/csrc/jit/python/python_interpreter.cpp
index 82a0d22..29b7929 100644
--- a/torch/csrc/jit/python/python_interpreter.cpp
+++ b/torch/csrc/jit/python/python_interpreter.cpp
@@ -43,7 +43,7 @@
AT_ASSERT(op->outputs().size() == 1);
- return [=](Stack* stack) {
+ return [=](Stack& stack) {
pybind11::gil_scoped_acquire gil;
py::tuple py_inputs(op->cconv.size());
size_t i = 0;
@@ -66,7 +66,7 @@
drop(stack, num_inputs);
try {
py::object py_output(func(*py_inputs));
- stack->push_back(returnToIValue(op->output()->type(), py_output));
+ stack.push_back(returnToIValue(op->output()->type(), py_output));
} catch (py::error_already_set& e) {
throw std::runtime_error(e.what());
}
diff --git a/torch/csrc/jit/runtime/graph_executor.cpp b/torch/csrc/jit/runtime/graph_executor.cpp
index 4768826..0187988 100644
--- a/torch/csrc/jit/runtime/graph_executor.cpp
+++ b/torch/csrc/jit/runtime/graph_executor.cpp
@@ -377,7 +377,7 @@
num_outputs(this->grad.f->outputs().size()) {}
// XXX: keep in mind that stack can be larger than the inputs we need!
- void operator()(Stack* stack) const {
+ void operator()(Stack& stack) const {
auto grad_fn = std::make_shared<DifferentiableGraphBackward>(
grad_executor,
grad.df_input_vjps.size(),
@@ -394,13 +394,13 @@
captureInputs(*grad_fn, inputs);
}
- detachVariables(*stack);
+ detachVariables(stack);
if (IsNewExecutorEnabled()) {
ExecutionPlan plan =
- f_ptr->getPlanFor(*stack, GraphExecutor::getDefaultNumBailOuts());
- InterpreterState(plan.code).run(*stack);
+ f_ptr->getPlanFor(stack, GraphExecutor::getDefaultNumBailOuts());
+ InterpreterState(plan.code).run(stack);
} else {
- InterpreterState(legacy_f).run(*stack);
+ InterpreterState(legacy_f).run(stack);
}
{
@@ -419,7 +419,7 @@
// drop the temporary outputs so that we return the same number of
// outputs as if we were not also calculating gradient
const size_t num_temporary_outputs = num_outputs - grad.f_real_outputs;
- stack->erase(stack->end() - num_temporary_outputs, stack->end());
+ stack.erase(stack.end() - num_temporary_outputs, stack.end());
}
}
diff --git a/torch/csrc/jit/runtime/interpreter.cpp b/torch/csrc/jit/runtime/interpreter.cpp
index be2019e..70c9c6c 100644
--- a/torch/csrc/jit/runtime/interpreter.cpp
+++ b/torch/csrc/jit/runtime/interpreter.cpp
@@ -297,13 +297,13 @@
}
case INST(OP): {
INST_GUARD;
- frame.function->operator_table_[inst.X](&stack);
+ frame.function->operator_table_[inst.X](stack);
}
INST_NEXT;
case INST(OPN): {
INST_GUARD;
stack.push_back(inst.N);
- frame.function->operator_table_[inst.X](&stack);
+ frame.function->operator_table_[inst.X](stack);
}
INST_NEXT;
case INST(LOAD): {
diff --git a/torch/csrc/jit/runtime/register_c10_ops.cpp b/torch/csrc/jit/runtime/register_c10_ops.cpp
index 993d411..4d541ec 100644
--- a/torch/csrc/jit/runtime/register_c10_ops.cpp
+++ b/torch/csrc/jit/runtime/register_c10_ops.cpp
@@ -12,7 +12,7 @@
namespace {
Operator createOperatorFromC10(const c10::OperatorHandle& op) {
- return Operator(op, [op](Stack* stack) { op.callBoxed(stack); });
+ return Operator(op, [op](Stack& stack) { op.callBoxed(stack); });
}
class RegistrationListener final : public c10::OpRegistrationListener {
diff --git a/torch/csrc/jit/runtime/register_cuda_ops.cpp b/torch/csrc/jit/runtime/register_cuda_ops.cpp
index f7a989d..599fd53 100644
--- a/torch/csrc/jit/runtime/register_cuda_ops.cpp
+++ b/torch/csrc/jit/runtime/register_cuda_ops.cpp
@@ -38,7 +38,7 @@
RegisterOperators const reg({
Operator(
"cuda::current_stream.device(Device? device) -> __torch__.torch.classes.cuda.Stream",
- [](Stack* stack) {
+ [](Stack& stack) {
auto device = pop(stack).toOptional<c10::Device>();
c10::DeviceIndex device_index = device.has_value()
? device->index()
@@ -50,7 +50,7 @@
aliasAnalysisFromSchema()),
Operator(
"cuda::current_stream.int(int? val) -> __torch__.torch.classes.cuda.Stream",
- [](Stack* stack) {
+ [](Stack& stack) {
auto idx = pop(stack).toOptional<int64_t>();
c10::DeviceIndex device_index = idx.has_value()
? static_cast<c10::DeviceIndex>(idx.value())
@@ -62,7 +62,7 @@
aliasAnalysisFromSchema()),
Operator(
"cuda::default_stream.device(Device? device) -> __torch__.torch.classes.cuda.Stream",
- [](Stack* stack) {
+ [](Stack& stack) {
auto device = pop(stack).toOptional<c10::Device>();
c10::DeviceIndex device_index = device.has_value()
? device->index()
@@ -74,7 +74,7 @@
aliasAnalysisFromSchema()),
Operator(
"cuda::default_stream.int(int? val) -> __torch__.torch.classes.cuda.Stream",
- [](Stack* stack) {
+ [](Stack& stack) {
auto idx = pop(stack).toOptional<int64_t>();
c10::DeviceIndex device_index = idx.has_value()
? static_cast<c10::DeviceIndex>(idx.value())
@@ -86,14 +86,14 @@
aliasAnalysisFromSchema()),
Operator(
"cuda::_current_device() -> int",
- [](Stack* stack) {
+ [](Stack& stack) {
auto v = c10::cuda::current_device();
push(stack, static_cast<int>(v));
},
aliasAnalysisFromSchema()),
Operator(
"cuda::_set_device(int64_t val) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
int64_t idx = -1;
pop(stack, idx);
c10::cuda::set_device(static_cast<c10::DeviceIndex>(idx));
@@ -101,7 +101,7 @@
aliasAnalysisFromSchema()),
Operator(
"cuda::device_index(Device device) -> int",
- [](Stack* stack) {
+ [](Stack& stack) {
auto device = pop(stack);
auto idx = device.toDevice().index();
push(stack, idx);
@@ -109,11 +109,11 @@
aliasAnalysisFromSchema()),
Operator(
"cuda::device_count() -> int",
- [](Stack* stack) { push(stack, at::cuda::device_count()); },
+ [](Stack& stack) { push(stack, at::cuda::device_count()); },
aliasAnalysisFromSchema()),
Operator(
"cuda::set_stream(__torch__.torch.classes.cuda.Stream stream) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
auto v = pop(stack);
auto s = v.toCustomClass<torch::jit::CUDAStream>();
auto stream_device_idx = static_cast<int64_t>(s->device_index());
@@ -141,11 +141,11 @@
aliasAnalysisFromSchema()),
Operator(
"cuda::synchronize() -> ()",
- [](Stack* stack) { c10::cuda::device_synchronize(); },
+ [](Stack& stack) { c10::cuda::device_synchronize(); },
aliasAnalysisFromSchema()),
Operator(
"cuda::synchronize.device(Device? device) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
auto device = pop(stack).toOptional<c10::Device>();
c10::DeviceIndex device_index = device.has_value()
? device->index()
@@ -155,7 +155,7 @@
aliasAnalysisFromSchema()),
Operator(
"cuda::synchronize.int(int? val) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
auto idx = pop(stack).toOptional<int64_t>();
c10::DeviceIndex device_index = idx.has_value()
? static_cast<c10::DeviceIndex>(idx.value())
diff --git a/torch/csrc/jit/runtime/register_distributed_ops.cpp b/torch/csrc/jit/runtime/register_distributed_ops.cpp
index 2c8277d..edf7a0c 100644
--- a/torch/csrc/jit/runtime/register_distributed_ops.cpp
+++ b/torch/csrc/jit/runtime/register_distributed_ops.cpp
@@ -29,11 +29,11 @@
// prepare the rpc input arguments and call the C++ impls
void prepare_and_call_rpc_op(
- Stack* stack,
+ Stack& stack,
int num_inputs,
const std::string& rpc_op) {
// Get inputs from the stack.
- auto stackIter = stack->end() - num_inputs;
+ auto stackIter = stack.end() - num_inputs;
auto& dstWorkerIValue = *stackIter++;
auto& qualifiedNameIValue = *stackIter++;
IValue emptyTuple(c10::ivalue::Tuple::create({}));
@@ -137,7 +137,7 @@
rpcTimeout);
// Push output to the stack.
drop(stack, num_inputs);
- stack->emplace_back(std::move(futureIValuePtr));
+ stack.emplace_back(std::move(futureIValuePtr));
} else if (rpc_op == "rpc_sync") {
// Send RPC request.
auto futureIValuePtr = dist_rpc::rpcTorchscript(
@@ -154,7 +154,7 @@
auto res = futureIValuePtr->value();
// Push output to the stack.
drop(stack, num_inputs);
- stack->emplace_back(std::move(res));
+ stack.emplace_back(std::move(res));
}
} else if (rpc_op == "rpc_remote") {
auto rrefPtr = dist_rpc::remoteTorchscript(
@@ -165,7 +165,7 @@
rpcTimeout);
// Push output to the stack.
drop(stack, num_inputs);
- stack->emplace_back(
+ stack.emplace_back(
c10::static_intrusive_pointer_cast<c10::RRefInterface>(rrefPtr));
} else {
throw std::runtime_error(
@@ -178,7 +178,7 @@
fmt::format(
"aten::to_here(RRef(t) self, float timeout = {}) -> t(*)",
torch::distributed::rpc::kDefaultRpcTimeoutSeconds),
- [](Stack* stack) {
+ [](Stack& stack) {
auto timeout = pop(stack).toDouble();
auto rref = pop(stack).toRRef();
IValue res;
@@ -195,7 +195,7 @@
aliasAnalysisFromSchema()),
Operator(
"aten::local_value(RRef(t) self) -> t(*)",
- [](Stack* stack) {
+ [](Stack& stack) {
auto rref = pop(stack).toRRef();
TORCH_CHECK(
rref->isOwner(),
@@ -208,14 +208,14 @@
aliasAnalysisFromSchema()),
Operator(
"aten::is_owner(RRef(t) self) -> bool",
- [](Stack* stack) {
+ [](Stack& stack) {
auto rref = pop(stack).toRRef();
push(stack, rref->isOwner());
},
aliasAnalysisFromSchema()),
Operator(
"aten::owner(RRef(t) self) -> __torch__.torch.classes.dist_rpc.WorkerInfo",
- [](Stack* stack) {
+ [](Stack& stack) {
auto rref = pop(stack).toRRef();
push(
stack,
@@ -225,21 +225,21 @@
aliasAnalysisFromSchema()),
Operator(
"aten::owner_name(RRef(t) self) -> str",
- [](Stack* stack) {
+ [](Stack& stack) {
auto rref = pop(stack).toRRef();
push(stack, rref->ownerName());
},
aliasAnalysisFromSchema()),
Operator(
"aten::confirmed_by_owner(RRef(t) self) -> bool",
- [](Stack* stack) {
+ [](Stack& stack) {
auto rref = pop(stack).toRRef();
push(stack, rref->confirmedByOwner());
},
aliasAnalysisFromSchema()),
Operator(
"aten::dist_backward(int context_id, Tensor[] roots, bool retain_graph=False) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
bool retain_graph = pop(stack).toBool();
auto roots_list = pop(stack).toTensorList();
int64_t context_id = pop(stack).toInt();
@@ -252,7 +252,7 @@
prim::rpc_sync,
[](const Node* node) -> Operation {
int num_inputs = node->inputs().size();
- return [num_inputs](Stack* stack) {
+ return [num_inputs](Stack& stack) {
prepare_and_call_rpc_op(stack, num_inputs, "rpc_sync");
};
},
@@ -261,7 +261,7 @@
prim::rpc_remote,
[](const Node* node) -> Operation {
int num_inputs = node->inputs().size();
- return [num_inputs](Stack* stack) {
+ return [num_inputs](Stack& stack) {
prepare_and_call_rpc_op(stack, num_inputs, "rpc_remote");
};
},
@@ -270,7 +270,7 @@
prim::rpc_async,
[](const Node* node) -> Operation {
int num_inputs = node->inputs().size();
- return [num_inputs](Stack* stack) {
+ return [num_inputs](Stack& stack) {
prepare_and_call_rpc_op(stack, num_inputs, "rpc_async");
};
},
diff --git a/torch/csrc/jit/runtime/register_ops_utils.cpp b/torch/csrc/jit/runtime/register_ops_utils.cpp
index 91ff2c7..64bb3ab 100644
--- a/torch/csrc/jit/runtime/register_ops_utils.cpp
+++ b/torch/csrc/jit/runtime/register_ops_utils.cpp
@@ -13,7 +13,7 @@
}
template <>
-void listIndex<at::Tensor>(Stack* stack) {
+void listIndex<at::Tensor>(Stack& stack) {
at::Tensor elem = pop(stack).to<at::Tensor>();
c10::List<at::Tensor> list = pop(stack).to<c10::List<at::Tensor>>();
@@ -31,7 +31,7 @@
}
template <>
-void listCount<at::Tensor>(Stack* stack) {
+void listCount<at::Tensor>(Stack& stack) {
at::Tensor elem = pop(stack).to<at::Tensor>();
c10::List<at::Tensor> list = pop(stack).to<c10::List<at::Tensor>>();
@@ -44,21 +44,21 @@
}
template <>
-void listEq<at::Tensor>(Stack* stack) {
+void listEq<at::Tensor>(Stack& stack) {
c10::List<at::Tensor> b = pop(stack).to<c10::List<at::Tensor>>();
c10::List<at::Tensor> a = pop(stack).to<c10::List<at::Tensor>>();
push(stack, tensor_list_equal(a, b));
}
template <>
-void listNe<at::Tensor>(Stack* stack) {
+void listNe<at::Tensor>(Stack& stack) {
c10::List<at::Tensor> b = pop(stack).to<c10::List<at::Tensor>>();
c10::List<at::Tensor> a = pop(stack).to<c10::List<at::Tensor>>();
push(stack, !tensor_list_equal(a, b));
}
template <>
-void listSort<at::Tensor>(Stack* stack) {
+void listSort<at::Tensor>(Stack& stack) {
bool reverse = pop(stack).toBool();
c10::List<at::Tensor> list = pop(stack).toTensorList();
std::sort(
@@ -74,7 +74,7 @@
}
template <>
-void listCopyAndSort<at::Tensor>(Stack* stack) {
+void listCopyAndSort<at::Tensor>(Stack& stack) {
c10::List<at::Tensor> list = pop(stack).toTensorList();
auto list_copied = list.copy();
std::sort(
@@ -87,7 +87,7 @@
}
template <>
-void listRemove<at::Tensor>(Stack* stack) {
+void listRemove<at::Tensor>(Stack& stack) {
at::Tensor elem = pop(stack).to<at::Tensor>();
c10::List<at::Tensor> list = pop(stack).to<c10::List<at::Tensor>>();
@@ -268,7 +268,7 @@
return idx;
}
-void listAppend(Stack* stack) {
+void listAppend(Stack& stack) {
IValue el = pop(stack).to<IValue>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
@@ -276,13 +276,13 @@
push(stack, std::move(list));
}
-void listReverse(Stack* stack) {
+void listReverse(Stack& stack) {
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
std::reverse(list.begin(), list.end());
}
-void listPopImpl(Stack* stack, const char* empty_message) {
+void listPopImpl(Stack& stack, const char* empty_message) {
int64_t idx = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
@@ -297,22 +297,22 @@
list.erase(list.begin() + normalized_idx);
}
-void listPop(Stack* stack) {
+void listPop(Stack& stack) {
return listPopImpl(stack, "pop from empty list");
}
-void listClear(Stack* stack) {
+void listClear(Stack& stack) {
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
list.clear();
}
-void listDelete(Stack* stack) {
+void listDelete(Stack& stack) {
listPopImpl(stack, "pop index out of range");
pop(stack);
}
-void listInsert(Stack* stack) {
+void listInsert(Stack& stack) {
IValue elem = pop(stack).to<IValue>();
int64_t idx = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
@@ -331,7 +331,7 @@
}
}
-void listExtend(Stack* stack) {
+void listExtend(Stack& stack) {
c10::List<IValue> b = pop(stack).to<c10::List<IValue>>();
c10::List<IValue> a = pop(stack).to<c10::List<IValue>>();
@@ -341,12 +341,12 @@
}
}
-void listCopy(Stack* stack) {
+void listCopy(Stack& stack) {
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
push(stack, list.copy());
}
-void listSelect(Stack* stack) {
+void listSelect(Stack& stack) {
int64_t idx = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
@@ -354,19 +354,19 @@
push(stack, std::move(element));
}
-void listLen(Stack* stack) {
+void listLen(Stack& stack) {
c10::List<IValue> a = pop(stack).to<c10::List<IValue>>();
const int64_t size = a.size();
push(stack, size);
}
-void listList(Stack* stack) {
+void listList(Stack& stack) {
c10::List<IValue> a = pop(stack).to<c10::List<IValue>>();
push(stack, a.copy());
}
-void listAdd(Stack* stack) {
+void listAdd(Stack& stack) {
c10::List<IValue> b = pop(stack).to<c10::List<IValue>>();
c10::List<IValue> a = pop(stack).to<c10::List<IValue>>();
@@ -383,14 +383,14 @@
push(stack, std::move(ret));
}
-void listInplaceAdd(Stack* stack) {
+void listInplaceAdd(Stack& stack) {
c10::List<IValue> b = pop(stack).to<List<IValue>>();
c10::List<IValue> a = pop(stack).to<List<IValue>>();
a.append(std::move(b));
push(stack, std::move(a));
}
-void listMulIntLeftInPlace(Stack* stack) {
+void listMulIntLeftInPlace(Stack& stack) {
int64_t n = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
if (n <= 0) {
@@ -408,7 +408,7 @@
push(stack, std::move(list));
}
-void listMulIntLeft(Stack* stack) {
+void listMulIntLeft(Stack& stack) {
int64_t n = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
@@ -426,7 +426,7 @@
push(stack, std::move(ret));
}
-void listMulIntRight(Stack* stack) {
+void listMulIntRight(Stack& stack) {
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
int64_t n = pop(stack).to<int64_t>();
@@ -444,7 +444,7 @@
push(stack, std::move(ret));
}
-void listSlice(Stack* stack) {
+void listSlice(Stack& stack) {
auto step_val = pop(stack);
auto end_val = pop(stack);
auto start_val = pop(stack);
@@ -477,7 +477,7 @@
push(stack, std::move(sliced_list));
}
-void listSetItem(Stack* stack) {
+void listSetItem(Stack& stack) {
IValue value = pop(stack).to<IValue>();
int64_t idx = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
diff --git a/torch/csrc/jit/runtime/register_ops_utils.h b/torch/csrc/jit/runtime/register_ops_utils.h
index 5d00872..a4efb67 100644
--- a/torch/csrc/jit/runtime/register_ops_utils.h
+++ b/torch/csrc/jit/runtime/register_ops_utils.h
@@ -55,7 +55,7 @@
template <>
c10::impl::GenericList make_result_list<IValue>(const TypePtr& elemType);
-inline void noop(Stack* n) {}
+inline void noop(Stack& n) {}
// As described in https://docs.python.org/3/library/functions.html#round
// When a number is exactly halfway between two integers, python builtin round
@@ -181,12 +181,12 @@
list.set(normalized_idx, std::forward<T>(value));
}
-void listAppend(Stack* stack);
+void listAppend(Stack& stack);
-void listReverse(Stack* stack);
+void listReverse(Stack& stack);
template <typename T>
-void minList(Stack* stack) {
+void minList(Stack& stack) {
c10::List<T> a = pop(stack).to<c10::List<T>>();
c10::List<T> b = pop(stack).to<c10::List<T>>();
@@ -204,7 +204,7 @@
}
template <typename T>
-void maxList(Stack* stack) {
+void maxList(Stack& stack) {
c10::List<T> a = pop(stack).to<c10::List<T>>();
c10::List<T> b = pop(stack).to<c10::List<T>>();
@@ -221,18 +221,18 @@
push(stack, b.size() > a.size() ? b : a);
}
-void listPopImpl(Stack* stack, const char* empty_message);
+void listPopImpl(Stack& stack, const char* empty_message);
-void listPop(Stack* stack);
+void listPop(Stack& stack);
-void listClear(Stack* stack);
+void listClear(Stack& stack);
-void listDelete(Stack* stack);
+void listDelete(Stack& stack);
-void listInsert(Stack* stack);
+void listInsert(Stack& stack);
template <typename T>
-void listRemove(Stack* stack) {
+void listRemove(Stack& stack) {
T elem = pop(stack).to<T>();
c10::List<T> list = pop(stack).to<c10::List<T>>();
@@ -246,7 +246,7 @@
}
template <typename T>
-void listMin(Stack* stack) {
+void listMin(Stack& stack) {
c10::List<T> list = pop(stack).to<c10::List<T>>();
size_t list_size = list.size();
if (list_size == 0) {
@@ -259,11 +259,11 @@
min_elem = elem < min_elem ? elem : min_elem;
}
- stack->push_back(min_elem);
+ stack.push_back(min_elem);
}
template <typename T>
-void listMax(Stack* stack) {
+void listMax(Stack& stack) {
c10::List<T> list = pop(stack).to<c10::List<T>>();
size_t list_size = list.size();
if (list_size == 0) {
@@ -276,14 +276,14 @@
max_elem = elem > max_elem ? elem : max_elem;
}
- stack->push_back(max_elem);
+ stack.push_back(max_elem);
}
template <>
-void listRemove<at::Tensor>(Stack* stack);
+void listRemove<at::Tensor>(Stack& stack);
template <typename T>
-void listIndex(Stack* stack) {
+void listIndex(Stack& stack) {
T elem = pop(stack).to<T>();
c10::List<T> list = pop(stack).to<c10::List<T>>();
@@ -297,10 +297,10 @@
}
template <>
-void listIndex<at::Tensor>(Stack* stack);
+void listIndex<at::Tensor>(Stack& stack);
template <typename T>
-void listCount(Stack* stack) {
+void listCount(Stack& stack) {
T elem = pop(stack).to<T>();
c10::List<T> list = pop(stack).to<c10::List<T>>();
@@ -309,25 +309,25 @@
}
template <>
-void listCount<at::Tensor>(Stack* stack);
+void listCount<at::Tensor>(Stack& stack);
-void listExtend(Stack* stack);
+void listExtend(Stack& stack);
-void listCopy(Stack* stack);
+void listCopy(Stack& stack);
-void listSelect(Stack* stack);
+void listSelect(Stack& stack);
-void listLen(Stack* stack);
+void listLen(Stack& stack);
template <typename T>
-void listEq(Stack* stack) {
+void listEq(Stack& stack) {
c10::List<T> b = pop(stack).to<c10::List<T>>();
c10::List<T> a = pop(stack).to<c10::List<T>>();
push(stack, a == b);
}
template <typename T>
-void listNe(Stack* stack) {
+void listNe(Stack& stack) {
c10::List<T> b = pop(stack).to<c10::List<T>>();
c10::List<T> a = pop(stack).to<c10::List<T>>();
push(stack, a != b);
@@ -357,16 +357,16 @@
// Specialization for at::Tensor, since it doesn't define operator==
template <>
-void listEq<at::Tensor>(Stack* stack);
+void listEq<at::Tensor>(Stack& stack);
// Specialization for at::Tensor, since it doesn't define operator==
template <>
-void listNe<at::Tensor>(Stack* stack);
+void listNe<at::Tensor>(Stack& stack);
-void listList(Stack* stack);
+void listList(Stack& stack);
template <typename T>
-void listContains(Stack* stack) {
+void listContains(Stack& stack) {
auto key = pop(stack).to<T>();
auto list = pop(stack).to<c10::List<T>>();
// NOLINTNEXTLINE(performance-implicit-conversion-in-loop)
@@ -379,20 +379,20 @@
push(stack, false);
}
-void listAdd(Stack* stack);
+void listAdd(Stack& stack);
-void listInplaceAdd(Stack* stack);
+void listInplaceAdd(Stack& stack);
-void listMulIntLeftInPlace(Stack* stack);
+void listMulIntLeftInPlace(Stack& stack);
-void listMulIntLeft(Stack* stack);
+void listMulIntLeft(Stack& stack);
-void listMulIntRight(Stack* stack);
+void listMulIntRight(Stack& stack);
-void listSlice(Stack* stack);
+void listSlice(Stack& stack);
template <typename T>
-void listSort(Stack* stack) {
+void listSort(Stack& stack) {
bool reverse = pop(stack).toBool();
c10::List<T> list = pop(stack).to<c10::List<T>>();
std::sort(list.begin(), list.end(), [reverse](const T& a, const T& b) {
@@ -408,10 +408,10 @@
// Specialization for at::Tensor
template <>
-void listSort<at::Tensor>(Stack* stack);
+void listSort<at::Tensor>(Stack& stack);
template <typename T>
-void listCopyAndSort(Stack* stack) {
+void listCopyAndSort(Stack& stack) {
c10::List<T> list = pop(stack).to<c10::List<T>>();
auto list_copied = list.copy();
std::sort(list_copied.begin(), list_copied.end(), [](const T& a, const T& b) {
@@ -426,22 +426,22 @@
// Specialization for at::Tensor
template <>
-void listCopyAndSort<at::Tensor>(Stack* stack);
+void listCopyAndSort<at::Tensor>(Stack& stack);
-void listSetItem(Stack* stack);
+void listSetItem(Stack& stack);
struct OperatorGeneratorArgs {
const char* schema_str;
bool isOperationCreator;
union {
- void (*operation)(Stack*);
+ void (*operation)(Stack&);
OperationCreator operationCreator;
};
AliasAnalysisKind aliasAnalysis;
explicit constexpr OperatorGeneratorArgs(
torch::detail::SelectiveStr<true> schema_str,
- void (*op)(Stack*),
+ void (*op)(Stack&),
AliasAnalysisKind aa)
: schema_str(schema_str),
isOperationCreator(false),
@@ -472,7 +472,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op \
".int_int(int a, int b) -> " #int_float_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a, b; \
pop(stack, a, b); \
push(stack, op); \
@@ -482,7 +482,7 @@
TORCH_SELECTIVE_SCHEMA( \
#aten_op \
".float_float(float a, float b) -> " #int_float_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a, b; \
pop(stack, a, b); \
push(stack, op); \
@@ -492,7 +492,7 @@
TORCH_SELECTIVE_SCHEMA( \
#aten_op \
".complex_complex(complex a, complex b) -> " #complex_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c10::complex<double> a, b; \
pop(stack, a, b); \
push(stack, op); \
@@ -503,7 +503,7 @@
#define DEFINE_GENERIC_OP(aten_op, int_op, float_op, int_result, float_result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a, b; \
pop(stack, a, b); \
push(stack, int_op); \
@@ -512,7 +512,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".float(float a, float b) -> " #float_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a, b; \
pop(stack, a, b); \
push(stack, float_op); \
@@ -523,7 +523,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op \
".int_float(int a, float b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a; \
double b; \
pop(stack, a, b); \
@@ -533,7 +533,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op \
".float_int(float a, int b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a; \
int64_t b; \
pop(stack, a, b); \
@@ -544,7 +544,7 @@
#define DEFINE_INT_OP(aten_op, op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> int"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a, b; \
pop(stack, a, b); \
push(stack, op); /* NOLINT(hicpp-signed-bitwise) */ \
@@ -554,7 +554,7 @@
#define DEFINE_STR_CMP_OP(aten_op, op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".str(str a, str b) -> bool"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
auto b = pop(stack).toStringRef(); \
auto a = pop(stack).toStringRef(); \
push(stack, op); \
@@ -570,7 +570,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op string_val \
"(Scalar a, Scalar b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
IValue x, y; \
pop(stack, x, y); \
if (x.isDouble()) { \
@@ -625,7 +625,7 @@
#define DEFINE_UNARY_INT_OP(aten_op, op, result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a; \
pop(stack, a); \
push(stack, op); \
@@ -635,7 +635,7 @@
#define DEFINE_UNARY_FLOAT_OP(aten_op, op, result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".float(float a) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a; \
pop(stack, a); \
push(stack, op); \
@@ -647,7 +647,7 @@
DEFINE_UNARY_FLOAT_OP(aten_op, op, float_result), \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
IValue x; \
pop(stack, x); \
if (x.isDouble()) { \
@@ -662,7 +662,7 @@
#define DEFINE_BOOL_OP(aten_op, op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".bool(bool a, bool b) -> bool"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
bool a, b; \
pop(stack, a, b); \
push(stack, op); \
@@ -671,7 +671,7 @@
#define DEFINE_STRING_OP(op_name, string_op, result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#op_name ".str(str a, str b) ->" #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
auto b = pop(stack).toStringRef(); \
auto a = pop(stack).toStringRef(); \
push(stack, string_op); \
@@ -685,7 +685,7 @@
#define DEFINE_UNARY_COMPLEX_OP(aten_op, op, result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".complex(complex a) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c10::complex<double> a; \
pop(stack, a); \
push(stack, op); \
@@ -709,7 +709,7 @@
DEFINE_UNARY_COMPLEX_OP(aten_op, op, complex_result), \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
IValue x; \
pop(stack, x); \
if (x.isDouble()) { \
@@ -739,7 +739,7 @@
complex_result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a, b; \
pop(stack, a, b); \
push(stack, int_op); \
@@ -748,7 +748,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".complex(complex a, complex b) -> " #complex_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c10::complex<double> a, b; \
pop(stack, a, b); \
push(stack, complex_op); \
@@ -757,7 +757,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".float(float a, float b) -> " #float_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a, b; \
pop(stack, a, b); \
push(stack, float_op); \
@@ -768,7 +768,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op \
".int_complex(int a, complex b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a; \
c10::complex<double> b; \
pop(stack, a, b); \
@@ -778,7 +778,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".complex_int(complex a, int b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c10::complex<double> a; \
int64_t b; \
pop(stack, a, b); \
@@ -790,7 +790,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".float_complex(float a, complex b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a; \
c10::complex<double> b; \
pop(stack, a, b); \
@@ -800,7 +800,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".complex_float(complex a, float b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c10::complex<double> a; \
double b; \
pop(stack, a, b); \
@@ -813,7 +813,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op string_val \
"(Scalar a, Scalar b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
IValue x, y; \
pop(stack, x, y); \
if (x.isComplexDouble()) { \
@@ -860,7 +860,7 @@
aten_op, int_op, float_op, complex_op, result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op "(Scalar a, Scalar b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
IValue x, y; \
pop(stack, x, y); \
if (x.isComplexDouble()) { \
diff --git a/torch/csrc/jit/runtime/register_prim_ops.cpp b/torch/csrc/jit/runtime/register_prim_ops.cpp
index 2953b68..9164471 100644
--- a/torch/csrc/jit/runtime/register_prim_ops.cpp
+++ b/torch/csrc/jit/runtime/register_prim_ops.cpp
@@ -89,7 +89,7 @@
static const OperatorGeneratorArgs opGenArgs[] = {
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::str(t elem) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
std::stringstream ss;
ss << pop(stack);
push(stack, ss.str());
@@ -97,7 +97,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::list(str t) -> str[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto str = pop(stack).toStringRef();
c10::List<std::string> chars;
chars.reserve(str.size());
@@ -109,7 +109,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::cpu(Tensor(a) self) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.cpu());
@@ -117,7 +117,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::layout(Tensor a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.layout());
@@ -128,7 +128,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::__range_length(int lo, int hi, int step) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t lo, hi, step;
pop(stack, lo, hi, step);
@@ -148,7 +148,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::__derive_index(int index, int start, int step) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t index, start, step;
pop(stack, index, start, step);
@@ -157,7 +157,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::TupleUnpack(Any tup) -> ..."),
- [](Stack* stack) { tupleUnpack(*stack); },
+ [](Stack& stack) { tupleUnpack(stack); },
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::unchecked_cast(t x) -> t"),
@@ -165,7 +165,7 @@
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::IntImplicit(Tensor a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
checkImplicitTensorToNum(a, /*to int*/ true);
@@ -174,7 +174,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ComplexImplicit(Tensor a) -> complex"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
checkImplicitTensorToNum(a, /*to int*/ false);
@@ -183,7 +183,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::FloatImplicit(Tensor a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
checkImplicitTensorToNum(a, /*to int*/ false);
@@ -192,7 +192,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ScalarImplicit(Tensor a) -> Scalar"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
checkImplicitTensorToNum(a, /*to int*/ false);
@@ -201,7 +201,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Bool.Tensor(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_nonzero());
@@ -209,7 +209,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Bool.int(int a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t i;
pop(stack, i);
@@ -218,7 +218,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Bool.float(float a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double d;
pop(stack, d);
@@ -227,7 +227,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Int.Tensor(Tensor a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.item<int64_t>());
@@ -235,7 +235,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Int.bool(bool a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool b;
pop(stack, b);
@@ -244,7 +244,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Int.float(float a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double d;
pop(stack, d);
@@ -253,7 +253,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Int.Scalar(Scalar a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue scalar;
pop(stack, scalar);
if (scalar.isInt()) {
@@ -266,7 +266,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Int.str(str a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto s = pop(stack).toString();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::string::size_type sz;
@@ -283,7 +283,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Float.Tensor(Tensor a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.item<double>());
@@ -291,7 +291,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Float.Scalar(Scalar a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue scalar;
pop(stack, scalar);
if (scalar.isDouble()) {
@@ -305,7 +305,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Float.int(int a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t i;
pop(stack, i);
@@ -314,7 +314,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Float.bool(bool a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool b;
pop(stack, b);
@@ -323,7 +323,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Float.str(str a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto s = pop(stack).toString();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::string::size_type sz;
@@ -340,7 +340,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Complex.Scalar(Scalar a) -> complex"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue scalar;
pop(stack, scalar);
if (scalar.isComplexDouble()) {
@@ -355,7 +355,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a, b;
pop(stack, a, b);
push(stack, c10::complex<double>(a.item<double>(), b.item<double>()));
@@ -363,21 +363,21 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::format(str self, ...) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
size_t num_inputs = pop(stack).toInt();
- format(*stack, num_inputs);
+ format(stack, num_inputs);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::einsum.sublist(Tensor a, ...) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
size_t num_inputs = pop(stack).toInt();
- einsum(*stack, num_inputs);
+ einsum(stack, num_inputs);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::NumToTensor.Scalar(Scalar a) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Scalar s;
pop(stack, s);
push(stack, at::scalar_to_tensor(s));
@@ -385,29 +385,29 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::RaiseException(str msg) -> ()"),
- [](Stack* stack) { throw JITException(pop(stack).toStringRef()); },
+ [](Stack& stack) { throw JITException(pop(stack).toStringRef()); },
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Size(int[] sizes) -> int[]"),
- [](Stack* stack) {},
+ [](Stack& stack) {},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::size(Tensor self) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto t = std::move(pop(stack)).toTensor();
pack(stack, t.sizes().vec());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::EnumName(AnyEnumType enum) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue e = pop(stack);
push(stack, e.toEnumHolder()->name());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::EnumValue.int(AnyEnumType enum) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue e = pop(stack);
push(stack, e.toEnumHolder()->value());
},
@@ -415,14 +415,14 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"prim::EnumValue.float(AnyEnumType enum) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue e = pop(stack);
push(stack, e.toEnumHolder()->value());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::EnumValue.str(AnyEnumType enum) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue e = pop(stack);
push(stack, e.toEnumHolder()->value());
},
@@ -431,7 +431,7 @@
// note the compiler knows to type TupleIndex more accurately than it
// is listed here.
TORCH_SELECTIVE_SCHEMA("prim::TupleIndex(Any tup, int i) -> Any"),
- [](Stack* stack) {
+ [](Stack& stack) {
int64_t index = pop(stack).toInt();
auto tuple = pop(stack).toTuple();
auto norm_index = normalizeIndex(index, tuple->elements().size());
@@ -439,7 +439,7 @@
norm_index > static_cast<int64_t>(tuple->elements().size())) {
throw std::out_of_range("Tuple list index out of range");
}
- stack->emplace_back(tuple->elements()[norm_index]);
+ stack.emplace_back(tuple->elements()[norm_index]);
},
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
@@ -453,11 +453,11 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::device(Tensor a) -> Device"),
- [](Stack* stack) { push(stack, pop(stack).toTensor().device()); },
+ [](Stack& stack) { push(stack, pop(stack).toTensor().device()); },
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::dtype(Tensor a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, static_cast<int64_t>(a.scalar_type()));
@@ -465,11 +465,11 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::__not__(bool self) -> bool"),
- [](Stack* stack) { push(stack, !pop(stack).toBool()); },
+ [](Stack& stack) { push(stack, !pop(stack).toBool()); },
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::__is__(t1 self, t2 obj) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue self, obj;
pop(stack, self, obj);
push(stack, self.is(obj));
@@ -477,7 +477,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::__isnot__(t1 self, t2 obj) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue self, obj;
pop(stack, self, obj);
push(stack, !self.is(obj));
@@ -485,28 +485,28 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::element_size(Tensor self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor arg = pop(stack).toTensor();
push(stack, arg.element_size());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::numel(Tensor self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor arg = pop(stack).toTensor();
push(stack, arg.numel());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::dim(Tensor self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor arg = pop(stack).toTensor();
push(stack, arg.dim());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::get_device(Tensor self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
RECORD_FUNCTION("get_device", std::vector<c10::IValue>());
auto result =
at::get_device((std::move(peek(stack, 0, 1))).toTensor());
@@ -516,7 +516,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::storage_offset(Tensor self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
RECORD_FUNCTION("storage_offset", std::vector<c10::IValue>());
auto result =
((std::move(peek(stack, 0, 1))).toTensor()).storage_offset();
@@ -526,7 +526,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::is_contiguous(Tensor self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
RECORD_FUNCTION("is_contiguous", std::vector<c10::IValue>());
auto result =
((std::move(peek(stack, 0, 1))).toTensor()).is_contiguous();
@@ -623,7 +623,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::eq.device(Device a, Device b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto a = pop(stack).toDevice();
auto b = pop(stack).toDevice();
push(stack, a == b);
@@ -631,7 +631,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ne.device(Device a, Device b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto a = pop(stack).toDevice();
auto b = pop(stack).toDevice();
push(stack, a != b);
@@ -639,7 +639,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::eq.bool(bool a, bool b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto a = pop(stack);
auto b = pop(stack);
push(stack, a == b);
@@ -647,7 +647,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ne.bool(bool a, bool b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto a = pop(stack);
auto b = pop(stack);
push(stack, a != b);
@@ -655,11 +655,11 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::Uninitialized() -> Any"),
- [](Stack* stack) { push(stack, IValue::uninitialized()); },
+ [](Stack& stack) { push(stack, IValue::uninitialized()); },
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::Print(...) -> ()"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
std::stringstream ss;
bool first = true;
@@ -682,7 +682,7 @@
// prim::VarConcat(Tensors..., dim) -> Tensor
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::VarConcat(...) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
auto dim = pop(stack).toInt();
std::vector<at::Tensor> inputs(num_inputs - 1);
@@ -694,7 +694,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::VarStack(...) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
auto dim = pop(stack).toInt();
std::vector<at::Tensor> inputs(num_inputs - 1);
@@ -707,7 +707,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue x = pop(stack);
IValue y = pop(stack);
push(stack, x == y);
@@ -716,7 +716,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue x = pop(stack);
IValue y = pop(stack);
push(stack, x != y);
@@ -731,7 +731,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::dequantize.tensor(Tensor qtensor) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor qtensor;
pop(stack, qtensor);
push(stack, at::dequantize(qtensor));
@@ -740,14 +740,14 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::dequantize.list(Tensor[] qtensors) -> Tensor[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto qtensors = pop(stack).toTensorVector();
push(stack, at::dequantize(qtensors));
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::dequantize.any(Any tensors) -> Any"),
- [](Stack* stack) { dequantize(*stack); },
+ [](Stack& stack) { dequantize(stack); },
aliasAnalysisFromSchema()),
DEFINE_UNARY_OP_WITH_COMPLEX(aten::log, std::log(a), float, float),
DEFINE_STRING_OP(aten::add, a + b, str),
@@ -847,7 +847,7 @@
float),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::pow.int_to_int(int a, int b) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t a, b;
pop(stack, a, b);
@@ -860,7 +860,7 @@
DEFINE_BINARY_OP(prim::max, a > b ? a : b),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::type(Device self) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto d = pop(stack);
push(
stack, DeviceTypeName(d.toDevice().type(), /* lower_case=*/true));
@@ -869,7 +869,7 @@
// tensor length op (size of 1st dimension)
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::len.Tensor(Tensor t) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor t = pop(stack).toTensor();
if (t.dim() == 0) {
AT_ERROR("len() of a 0-d tensor");
@@ -879,7 +879,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ord(str string) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto string = pop(stack).toStringRef();
TORCH_CHECK(
string.size() == 1,
@@ -891,7 +891,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::lower(str self) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto string = pop(stack).toStringRef();
std::stringstream ss;
for (char c : string) {
@@ -912,14 +912,14 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::len.str(str s) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto string = pop(stack).toStringRef();
push(stack, static_cast<int64_t>(string.size()));
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::dict() -> Dict(str, Tensor)"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto dict =
c10::impl::GenericDict(StringType::get(), TensorType::get());
push(stack, dict);
@@ -928,7 +928,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::__getitem__.str(str s, int index) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto index = pop(stack).toInt();
auto string = pop(stack).toStringRef();
auto norm_index = normalizeIndex(index, string.size());
@@ -941,7 +941,7 @@
TORCH_SELECTIVE_SCHEMA("aten::copy_." #other_type \
"(Tensor(a!) self, " #other_type \
" other) -> Tensor(a!)"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
at::Tensor t; \
c_type other; \
pop(stack, t, other); \
@@ -957,7 +957,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::backward(Tensor self, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()"),
- [](Stack* stack) {
+ [](Stack& stack) {
bool create_graph = pop(stack).toBool();
auto retain_graph = pop(stack).toOptional<bool>();
IValue gradient_ivalue = pop(stack);
@@ -977,7 +977,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
auto self = pop(stack).toTensor();
auto result = at::index(self, indices);
@@ -987,7 +987,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::_index_put_impl_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto unsafe = pop(stack).toBool();
auto accumulate = pop(stack).toBool();
auto values = pop(stack).toTensor();
@@ -1001,7 +1001,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::index_put_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto accumulate = pop(stack).toBool();
auto values = pop(stack).toTensor();
auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
@@ -1013,7 +1013,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto accumulate = pop(stack).toBool();
auto values = pop(stack).toTensor();
auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
@@ -1026,7 +1026,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool non_blocking;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@@ -1044,7 +1044,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool non_blocking;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@@ -1060,7 +1060,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_cuda(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_cuda());
@@ -1068,7 +1068,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_xpu(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_xpu());
@@ -1076,7 +1076,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::data(Tensor(a) a) -> Tensor(a)"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, autograd::Variable(a).variable_data());
@@ -1113,7 +1113,7 @@
#define DEFINE_STRING_IS_OP(op_name, char_op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#op_name "(str self) -> bool"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
auto string = pop(stack).toStringRef(); \
push( \
stack, \
@@ -1134,7 +1134,7 @@
#define DEFINE_STRING_CHAR_MAP_OP(op_name, char_op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#op_name "(str self) -> str"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
auto string = pop(stack).toStringRef(); \
std::stringstream ss; \
for (char c : string) { \
@@ -1183,7 +1183,7 @@
// operator below is intended to be as close to the Python
// implementation in torch/csrc/utils/tensor_list.cpp as possible.
[](const Node* /*node*/) -> Operation {
- return [](Stack* stack) {
+ return [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int elem_ty_val;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@@ -1260,19 +1260,19 @@
return v;
})());
-void dictSetItem(Stack* stack) {
+void dictSetItem(Stack& stack) {
auto value = pop(stack);
auto idx = pop(stack);
auto dict = pop(stack).toGenericDict();
dict.insert_or_assign(std::move(idx), std::move(value));
}
-void dictLen(Stack* stack) {
+void dictLen(Stack& stack) {
auto dict = pop(stack).toGenericDict();
push(stack, int64_t(dict.size()));
}
-void dictValues(Stack* stack) {
+void dictValues(Stack& stack) {
auto dict = pop(stack).toGenericDict();
auto values = c10::impl::GenericList(dict.valueType());
for (const auto& entry : dict) {
@@ -1281,7 +1281,7 @@
push(stack, values);
}
-void dictKeys(Stack* stack) {
+void dictKeys(Stack& stack) {
auto dict = pop(stack).toGenericDict();
auto keys = c10::impl::GenericList(dict.keyType());
for (const auto& entry : dict) {
@@ -1290,7 +1290,7 @@
push(stack, keys);
}
-void dictIndex(Stack* stack) {
+void dictIndex(Stack& stack) {
auto key = pop(stack);
auto dict = pop(stack).toGenericDict();
auto value = dict.find(key);
@@ -1301,7 +1301,7 @@
}
template <bool has_default>
-void dictGet(Stack* stack) {
+void dictGet(Stack& stack) {
IValue default_value;
if (has_default) {
default_value = pop(stack);
@@ -1318,7 +1318,7 @@
// If the key is in the dict, return it. Else set it to the default value and
// return that.
-void dictSetDefault(Stack* stack) {
+void dictSetDefault(Stack& stack) {
auto default_value = pop(stack);
auto key = pop(stack);
auto dict = pop(stack).toGenericDict();
@@ -1332,7 +1332,7 @@
}
template <bool has_default>
-void dictPop(Stack* stack) {
+void dictPop(Stack& stack) {
IValue default_value;
if (has_default) {
default_value = pop(stack);
@@ -1355,13 +1355,13 @@
}
}
-void dictDelete(Stack* stack) {
+void dictDelete(Stack& stack) {
dictPop<false>(stack);
// pop pushes an item on the stack but delete does not, so get rid of it
pop(stack);
}
-void dictPopItem(Stack* stack) {
+void dictPopItem(Stack& stack) {
auto dict = pop(stack).toGenericDict();
if (dict.size() == 0) {
AT_ERROR("popitem(): dictionary is empty");
@@ -1376,18 +1376,18 @@
push(stack, tuple);
}
-void dictContains(Stack* stack) {
+void dictContains(Stack& stack) {
auto key = pop(stack);
auto dict = pop(stack).toGenericDict();
push(stack, dict.contains(key));
}
-void dictClear(Stack* stack) {
+void dictClear(Stack& stack) {
auto dict = pop(stack).toGenericDict();
dict.clear();
}
-void dictUpdate(Stack* stack) {
+void dictUpdate(Stack& stack) {
auto to_add = pop(stack).toGenericDict();
auto dict = pop(stack).toGenericDict();
@@ -1396,7 +1396,7 @@
}
}
-void dictItems(Stack* stack) {
+void dictItems(Stack& stack) {
auto dict = pop(stack).toGenericDict();
auto key_type = dict.keyType();
auto value_type = dict.valueType();
@@ -1409,11 +1409,11 @@
push(stack, std::move(items));
}
-void dictCopy(Stack* stack) {
+void dictCopy(Stack& stack) {
push(stack, pop(stack).toGenericDict().copy());
}
-void dictConstructFromList(Stack* stack) {
+void dictConstructFromList(Stack& stack) {
auto input_list = pop(stack);
auto list = input_list.toList();
auto tup_type = list.elementType()->expect<TupleType>();
@@ -2120,7 +2120,7 @@
static const OperatorGeneratorArgs opGenArgs1[] = {
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::rangelist(int n) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t n;
pop(stack, n);
@@ -2136,7 +2136,7 @@
// because all _to_tensor conversion have to have the same operator namet
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::NumToTensor.bool(bool a) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool b;
pop(stack, b);
@@ -2145,21 +2145,21 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::device(str a) -> Device"),
- [](Stack* stack) {
+ [](Stack& stack) {
push(stack, c10::Device(pop(stack).toStringRef()));
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::percentFormat(str self, ...) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
size_t num_inputs = pop(stack).toInt();
- percentFormat(*stack, num_inputs);
+ percentFormat(stack, num_inputs);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor self;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool non_blocking;
@@ -2174,7 +2174,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::requires_grad(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.requires_grad());
@@ -2182,7 +2182,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::grad(Tensor a) -> Tensor(*)"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.grad());
@@ -2190,7 +2190,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_sparse(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_sparse());
@@ -2198,7 +2198,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_sparse_csr(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_sparse_csr());
@@ -2206,7 +2206,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_mkldnn(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_mkldnn());
@@ -2214,7 +2214,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_mlc(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_mlc());
@@ -2222,7 +2222,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_vulkan(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_vulkan());
@@ -2230,7 +2230,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_quantized(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_quantized());
@@ -2238,7 +2238,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_meta(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_meta());
@@ -2246,7 +2246,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_ort(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_ort());
@@ -2254,7 +2254,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::name(Tensor a) -> str?"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
if (a.name() == "") {
@@ -2266,7 +2266,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::index(Device self) -> int?"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto d = pop(stack).toDevice();
if (d.has_index()) {
push(stack, d.index());
@@ -2279,11 +2279,11 @@
// TODO return generator object when torchscript supports RNG
// first-class
TORCH_SELECTIVE_SCHEMA("aten::manual_seed(int seed) -> ()"),
- [](Stack* stack) { at::manual_seed(pop(stack).toInt()); },
+ [](Stack& stack) { at::manual_seed(pop(stack).toInt()); },
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::cuda(Tensor(a) self) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.cuda());
@@ -2291,12 +2291,12 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::AutogradZero() -> Tensor"),
- [](Stack* stack) { stack->emplace_back(at::Tensor()); },
+ [](Stack& stack) { stack.emplace_back(at::Tensor()); },
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"prim::ReductionSizes(int[] size, int[] red_axes, bool keepdim = False) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
bool keepdim = pop(stack).toBool();
c10::List<int64_t> axes = pop(stack).toIntList();
c10::List<int64_t> size = pop(stack).toIntList();
@@ -2324,7 +2324,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::BroadcastSizes(...) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
std::vector<int64_t> size;
size.reserve(8);
@@ -2339,7 +2339,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::warn(str message, int stacklevel=2) -> ()"),
- [](Stack* stack) {
+ [](Stack& stack) {
TORCH_CHECK(false, "warn is implemented directly in the interpreter");
},
aliasAnalysisFromSchema()),
@@ -2347,7 +2347,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"onnx::Reshape(Tensor input, Tensor shape) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor input, shape;
pop(stack, input, shape);
shape = shape.contiguous();
@@ -2358,7 +2358,7 @@
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("onnx::Shape(Tensor t) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto t = pop(stack).toTensor();
at::IntArrayRef sizes = t.sizes();
auto sizes_tensor = torch::empty(
@@ -2367,12 +2367,12 @@
for (const auto i : c10::irange(sizes.size())) {
accessor[i] = sizes[i];
}
- stack->emplace_back(sizes_tensor);
+ stack.emplace_back(sizes_tensor);
},
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::AutogradAnyNonZero(...) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
bool result = false;
for (const IValue& v : last(stack, num_inputs)) {
@@ -2395,12 +2395,12 @@
}
}
drop(stack, num_inputs);
- stack->emplace_back(result);
+ stack.emplace_back(result);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::AutogradAllZero(...) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
bool result = true;
for (const IValue& v : last(stack, num_inputs)) {
@@ -2411,12 +2411,12 @@
}
}
drop(stack, num_inputs);
- stack->emplace_back(result);
+ stack.emplace_back(result);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::AutogradAllNonZero(...) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
bool result = true;
for (const IValue& v : last(stack, num_inputs)) {
@@ -2427,31 +2427,31 @@
}
}
drop(stack, num_inputs);
- stack->emplace_back(result);
+ stack.emplace_back(result);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::AutogradAdd(Any a, Any b) -> Any"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a, b;
pop(stack, a, b);
// NOLINTNEXTLINE(bugprone-branch-clone)
if (!a.defined() && !b.defined()) {
// undef + undef == undef
- stack->emplace_back(a);
+ stack.emplace_back(a);
} else if (!a.defined()) {
- stack->emplace_back(b);
+ stack.emplace_back(b);
} else if (!b.defined()) {
- stack->emplace_back(a);
+ stack.emplace_back(a);
} else {
- stack->emplace_back(a + b);
+ stack.emplace_back(a + b);
}
},
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::_size_if_not_equal(int[] self_size, int[] other_size) -> int[]?"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue self_size, other_size;
pop(stack, self_size, other_size);
auto s = self_size.toIntVector();
@@ -2466,7 +2466,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::_unwrap_optional(t(a)? optional) -> t(a)"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto val = pop(stack);
TORCH_CHECK(!val.isNone(), "Unwrapping null optional");
push(stack, std::move(val));
@@ -2476,7 +2476,7 @@
RegisterOperators reg1(
createOperators(opGenArgs1, sizeof(opGenArgs1) / sizeof(opGenArgs1[0])));
-void hashValue(Stack* stack) {
+void hashValue(Stack& stack) {
auto value = pop(stack);
push(stack, value.hash());
}
@@ -2618,7 +2618,7 @@
#define DEFINE_CONVERT_BASE_OP(op_name, prefix, char_op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#op_name "(int i) -> str"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
auto i = pop(stack).toInt(); \
std::stringstream ss; \
if (i < 0) { \
@@ -2635,7 +2635,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::bin(int i) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto i = pop(stack).toInt();
std::stringstream ss;
if (i == 0) {
@@ -2656,7 +2656,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"prim::StringIndex(str string, int index) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto index = pop(stack).toInt();
auto string = pop(stack).toStringRef();
auto norm_index = normalizeIndex(index, string.size());
@@ -2666,7 +2666,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::chr(int i) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto i = pop(stack).toInt();
std::stringstream ss;
TORCH_CHECK(
@@ -2684,7 +2684,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::modf(float a) -> (float, float)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
pop(stack, a);
@@ -2696,7 +2696,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::frexp(float a) -> (float, int)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
pop(stack, a);
@@ -2710,7 +2710,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ldexp(float x, int i) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@@ -2810,7 +2810,7 @@
float),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::abs(Tensor x) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor x;
pop(stack, x);
push(stack, x.abs());
@@ -2833,7 +2833,7 @@
float),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::_tensor_to_list(Tensor self) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor t;
pop(stack, t);
c10::List<int64_t> elems;
@@ -2846,7 +2846,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::_list_to_tensor(int[] self) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<int64_t> l = pop(stack).toIntList();
auto t = torch::empty(
{static_cast<int64_t>(l.size())}, at::dtype(at::kInt));
@@ -2858,7 +2858,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::sum.int(int[] self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<int64_t> l = pop(stack).toIntList();
auto sum = 0;
for (const auto& elem : l) {
@@ -2869,7 +2869,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::sum.float(float[] self) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<double> l = pop(stack).toDoubleList();
auto sum = 0.0;
for (const auto& elem : l) {
@@ -2880,7 +2880,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::sum.complex(complex[] self) -> complex"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<c10::complex<double>> l = pop(stack).toComplexDoubleList();
c10::complex<double> sum = 0.0;
for (const auto i : c10::irange(l.size())) {
@@ -2891,7 +2891,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::sum.bool(bool[] self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<bool> l = pop(stack).toBoolList();
auto sum = 0;
for (const auto& elem : l) {
@@ -2904,7 +2904,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::any.str(str[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto l = pop(stack).toList();
for (const auto& elem : l) {
if (elem != "") {
@@ -2917,7 +2917,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::any.int(int[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<int64_t> l = pop(stack).toIntList();
for (const auto& elem : l) {
if (elem) {
@@ -2930,7 +2930,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::any.float(float[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<double> l = pop(stack).toDoubleList();
for (const auto& elem : l) {
if (elem) {
@@ -2943,7 +2943,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::any.bool(bool[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<bool> l = pop(stack).toBoolList();
for (const auto& elem : l) {
if (elem) {
@@ -2956,7 +2956,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::all.int(int[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<int64_t> l = pop(stack).toIntList();
for (const auto& elem : l) {
if (!elem) {
@@ -2969,7 +2969,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::all.float(float[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<double> l = pop(stack).toDoubleList();
for (const auto& elem : l) {
if (!elem) {
@@ -2982,7 +2982,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::all.bool(bool[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<bool> l = pop(stack).toBoolList();
for (const auto& elem : l) {
if (!elem) {
@@ -2995,7 +2995,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::divmod.int(int x, int y) -> (int, int)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t a, b;
lldiv_t divresult = {};
@@ -3018,7 +3018,7 @@
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::divmod.float(float x, float y) -> (float, float)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a, b;
pop(stack, a, b);
@@ -3035,7 +3035,7 @@
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::id(AnyClassType? x) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue a;
pop(stack, a);
if (a.isNone()) {
@@ -3050,7 +3050,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA("aten::divmod." #type_a "_" #type_b "(" #type_a \
" x," #type_b " y) -> (float, float)"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
type_a a; \
type_b b; \
pop(stack, a, b); \
@@ -3076,7 +3076,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_a "_" #type_b "(" #type_a \
" x," #type_b " y) -> complex"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
actual_type_a a; \
actual_type_b b; \
pop(stack, a, b); \
@@ -3090,7 +3090,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_a "_" #type_b "(" #type_a \
" x," #type_b " y) -> complex"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
actual_type_a a; \
actual_type_b b; \
pop(stack, a, b); \
@@ -3101,7 +3101,7 @@
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_b "_" #type_a \
"(" #type_b " x," #type_a " y) -> complex"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
actual_type_b a; \
actual_type_a b; \
pop(stack, a, b); \
diff --git a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp
index 43c278b..e43c7c0 100644
--- a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp
+++ b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp
@@ -31,7 +31,7 @@
{Operator(
prim::profile,
[](const Node* node) -> Operation {
- return [](Stack* stack) {
+ return [](Stack& stack) {
AT_ERROR(
"Must be lowered to Interpreter's PROFILE instruction"); // NOLINT
};
@@ -40,7 +40,7 @@
Operator(
prim::profile_ivalue,
[](const Node* node) -> Operation {
- return [](Stack* stack) {
+ return [](Stack& stack) {
AT_ERROR(
"Must be lowered to Interpreter's PROFILE instruction"); // NOLINT
};
@@ -50,9 +50,9 @@
prim::FusionGroup,
[](const Node* node) -> Operation {
const auto key = registerFusion(node);
- return [key](Stack* stack) {
+ return [key](Stack& stack) {
RECORD_FUNCTION("FusionGroup", std::vector<c10::IValue>());
- runFusion(key, *stack);
+ runFusion(key, stack);
};
},
aliasAnalysisSpecialCase()),
@@ -67,7 +67,7 @@
t->castRaw<TensorType>()->requiresGrad().has_value());
return *t->castRaw<TensorType>()->requiresGrad();
});
- return [rg_props](Stack* stack) {
+ return [rg_props](Stack& stack) {
auto num_inputs = rg_props.size();
// Check every input's shape against profiled (expected) shape.
for (const auto i : c10::irange(num_inputs)) {
@@ -91,14 +91,14 @@
auto outputs_used = fmap(node->outputs(), [](const Value* v) {
return v->uses().size() > 0;
});
- return [=](Stack* stack) {
+ return [=](Stack& stack) {
RECORD_FUNCTION("chunk", last(stack, 1));
at::Tensor t;
pop(stack, t);
auto result = at::chunk(t, chunks, dim);
- stack->insert(
- stack->end(),
+ stack.insert(
+ stack.end(),
std::make_move_iterator(result.begin()),
std::make_move_iterator(result.end()));
// NB: Chunk can sometimes return a smaller number of outputs.
@@ -121,7 +121,7 @@
num_results);
// We know that the output is unused, so it's ok to push
// anything on the stack.
- stack->emplace_back();
+ stack.emplace_back();
}
}
};
@@ -132,7 +132,7 @@
[](const Node* node) -> Operation {
int64_t raw_dim = node->i(attr::dim);
int64_t chunks = node->i(attr::chunks);
- return [raw_dim, chunks](Stack* stack) {
+ return [raw_dim, chunks](Stack& stack) {
c10::List<int64_t> shape = pop(stack).toIntList();
c10::List<int64_t> regular_shape = shape.copy();
c10::List<int64_t> last_shape = shape.copy();
@@ -158,7 +158,7 @@
aliasAnalysisSpecialCase()),
Operator(
"aten::_grad_sum_to_size(Tensor(a) self, int[]? size) -> Tensor(a)",
- [](Stack* stack) {
+ [](Stack& stack) {
RECORD_FUNCTION("_grad_sum_to_size", std::vector<c10::IValue>());
IValue self, size;
pop(stack, self, size);
@@ -175,7 +175,7 @@
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"prim::ModuleContainerIndex.list(Any self, int ind) -> Any"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue ind = pop(stack);
IValue module_dict = pop(stack);
std::stringstream ss;
@@ -189,7 +189,7 @@
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"prim::ModuleContainerIndex.dict(Any self, str ind) -> Any"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue ind = pop(stack);
IValue module_dict = pop(stack);
push(stack, module_dict.toModule().attr(ind.toStringRef()));
@@ -198,7 +198,7 @@
Operator(
prim::TypeCheck /* (...) -> (..., bool) */,
[](const Node* /* node */) -> Operation {
- return [](Stack* /* stack */) {
+ return [](Stack& /* stack */) {
AT_ERROR("prim::TypeCheck not yet implemented"); // NOLINT
};
},
@@ -206,7 +206,7 @@
Operator(
prim::FallbackGraph,
[](const Node* node) -> Operation {
- return [](Stack* stack) {
+ return [](Stack& stack) {
AT_ERROR(
"Must be converted to prim::FunctionCall by replaceFallbackGraphWithFallbackFunction"); // NOLINT
};
@@ -214,17 +214,17 @@
aliasAnalysisSpecialCase()),
Operator(
"prim::Guard(Tensor(a) t) -> Tensor(a)",
- [](Stack* stack) { AT_ERROR("Should be replaced by prim::BailOut"); },
+ [](Stack& stack) { AT_ERROR("Should be replaced by prim::BailOut"); },
aliasAnalysisFromSchema()),
Operator(
"prim::BailOut(...) -> Tensor(a)",
- [](Stack* /* stack */) {
+ [](Stack& /* stack */) {
AT_ERROR("prim::BailOut not yet implemented"); // NOLINT
},
aliasAnalysisFromSchema()),
Operator(
"prim::BailoutTemplate() -> int",
- [](Stack* stack) {
+ [](Stack& stack) {
// TODO: today, we put a single bailout template at the front to
// carry the un-optimized graph for bailout nodes to use. Ideally
// this should never run, but we haven't written the code to remove
@@ -237,7 +237,7 @@
aliasAnalysisFromSchema()),
Operator(
"aten::grad(Tensor[] outputs, Tensor[] inputs, Tensor?[]? grad_outputs=None, bool? retain_graph=None, bool create_graph=False, bool allow_unused=False) -> Tensor?[]",
- [](Stack* stack) {
+ [](Stack& stack) {
bool allow_unused = pop(stack).toBool();
bool create_graph = pop(stack).toBool();
auto retain_graph = pop(stack).toOptional<bool>();
@@ -277,7 +277,7 @@
// create_graph=True so we use aliasAnalysisConservative for these two OPs
Operator(
"aten::backward.TensorList(Tensor[] tensors, Tensor?[]? grad_tensors=None, bool? retain_graph=None, bool create_graph=False) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
bool create_graph = pop(stack).toBool();
auto retain_graph = pop(stack).toOptional<bool>();
auto grad_tensors = pop(stack);
@@ -298,7 +298,7 @@
aliasAnalysisConservative()),
Operator(
"aten::save(t item, str filename) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
auto filename = pop(stack).toStringRef();
auto ivalue = pop(stack);
@@ -312,7 +312,7 @@
aliasAnalysisFromSchema()),
Operator(
"prim::IgnoredPythonOp(...) -> None",
- [](Stack* stack) {
+ [](Stack& stack) {
throw JITException(
"This Python function is annotated to be ignored"
" and cannot be and has not been included in the exported"
@@ -323,7 +323,7 @@
aliasAnalysisFromSchema()),
Operator(
"aten::wait(Future(t) self) -> t",
- [](Stack* stack) {
+ [](Stack& stack) {
TORCH_CHECK(
false, "wait is implemented directly in the interpreter");
},
@@ -332,7 +332,7 @@
RegisterOperators logging_operators(
{Operator(
"prim::AddStatValue(str key, int val) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
auto val = pop(stack).toInt();
auto key = pop(stack).toString();
@@ -353,7 +353,7 @@
aliasAnalysisFromSchema()),
Operator(
"prim::TimePoint() -> int",
- [](Stack* stack) {
+ [](Stack& stack) {
auto schema = parseSchema("prim::TimePoint() -> int");
Node* node = nullptr;
// TODO: remove this custom tracing code once the custom op bugfix
@@ -372,7 +372,7 @@
},
aliasAnalysisFromSchema())});
-C10_UNUSED void hashValue(Stack* stack) {
+C10_UNUSED void hashValue(Stack& stack) {
auto value = pop(stack);
push(stack, value.hash());
}
@@ -453,7 +453,7 @@
}
template <bool has_reverse_arg, bool copy_return_list>
-void sort_op(Stack* stack) {
+void sort_op(Stack& stack) {
bool reverse = has_reverse_arg ? pop(stack).toBool() : false;
auto g_list = pop(stack).toList();
@@ -697,7 +697,7 @@
") ");
}
-void interpolate_op(Stack* stack) {
+void interpolate_op(Stack& stack) {
at::Tensor input;
IValue size;
IValue scale_factors;
@@ -743,7 +743,7 @@
return scale_factor_double;
}
-void upsample_nearest_op(Stack* stack) {
+void upsample_nearest_op(Stack& stack) {
at::Tensor input;
IValue size;
IValue scale_factor_int;
@@ -754,7 +754,7 @@
push(stack, std::move(res));
}
-void upsample_op(Stack* stack) {
+void upsample_op(Stack& stack) {
at::Tensor input;
IValue size;
IValue scale_factor_int;
@@ -772,7 +772,7 @@
push(stack, std::move(res));
}
-void upsample_bilinear_op(Stack* stack) {
+void upsample_bilinear_op(Stack& stack) {
at::Tensor input;
IValue size;
IValue scale_factor_int;
diff --git a/torch/csrc/jit/runtime/register_special_ops.cpp b/torch/csrc/jit/runtime/register_special_ops.cpp
index ace87f2..015d607 100644
--- a/torch/csrc/jit/runtime/register_special_ops.cpp
+++ b/torch/csrc/jit/runtime/register_special_ops.cpp
@@ -184,7 +184,7 @@
}
template <bool if_set_requires_grad>
-void createTensorFromList(Stack* stack) {
+void createTensorFromList(Stack& stack) {
// torch.tensor has a fourth requires_grad arg but torch.as_tensor not, so
// we use the template arg to distinguish between these two cases
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@@ -246,7 +246,7 @@
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::split(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
RECORD_FUNCTION("split_with_sizes", last(stack, 3));
auto result = at::split_with_sizes(
@@ -264,7 +264,7 @@
"aten::tensor." #operator_type "(" #operator_type \
" t, *, ScalarType? dtype=None, Device? device=None" \
", bool requires_grad=False) -> Tensor"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c_type scalar_val; \
IValue dtype; \
IValue device; \
@@ -280,7 +280,7 @@
TORCH_SELECTIVE_SCHEMA( \
"aten::as_tensor." #operator_type "(" #operator_type \
" t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c_type scalar_val; \
IValue dtype; \
IValue device; \
@@ -319,7 +319,7 @@
// tensor_new.cpp
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA("aten::_infer_size(int[] a, int[] b) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto a = pop(stack);
auto b = pop(stack);
push(stack, at::infer_size(a.toIntVector(), b.toIntVector()));
@@ -328,7 +328,7 @@
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::_no_grad_embedding_renorm_(Tensor weight, Tensor input, float max_norm, float norm_type) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor weight;
at::Tensor input;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@@ -353,7 +353,7 @@
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::as_tensor(Tensor(a) data, *, ScalarType? dtype=None, Device? device=None) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto device = pop(stack).toOptional<c10::Device>();
auto dtype = pop(stack).toOptional<at::ScalarType>();
at::Tensor data = pop(stack).toTensor();
@@ -377,24 +377,24 @@
TORCH_SELECTIVE_SCHEMA(
"aten::_pack_sequence(Tensor output, Tensor batch_sizes, Tensor? sorted_indices, "
"Tensor? unsorted_indices) -> (Tensor, Tensor, Tensor?, Tensor?)"),
- [](Stack* stack) {},
+ [](Stack& stack) {},
aliasAnalysisFromSchema()),
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA("aten::_get_tracing_state() -> bool"),
- [](Stack* stack) { push(stack, false); },
+ [](Stack& stack) { push(stack, false); },
aliasAnalysisFromSchema()),
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA("aten::is_scripting() -> bool"),
- [](Stack* stack) { push(stack, true); },
+ [](Stack& stack) { push(stack, true); },
aliasAnalysisFromSchema()),
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA("aten::has_torch_function(...) -> bool"),
- [](Stack* stack) { push(stack, false); },
+ [](Stack& stack) { push(stack, false); },
aliasAnalysisFromSchema()),
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::_no_grad_uniform_(Tensor(a!) tensor, float a, float b) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// TODO: remove when script supports setting grad mode
torch::NoGradGuard no_grad;
@@ -410,7 +410,7 @@
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::_no_grad_normal_(Tensor(a!) tensor, float mean, float std) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// TODO: remove when script supports setting grad mode
torch::NoGradGuard no_grad;
@@ -426,7 +426,7 @@
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::_no_grad_fill_(Tensor(a!) tensor, float val) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// TODO: remove when script supports setting grad mode
torch::NoGradGuard no_grad;
@@ -440,7 +440,7 @@
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::_no_grad_zero_(Tensor(a!) tensor) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// TODO: remove when script supports setting grad mode
torch::NoGradGuard no_grad;
@@ -451,11 +451,11 @@
aliasAnalysisFromSchema()),
Operator(
"aten::is_grad_enabled() -> bool",
- [](Stack* stack) { push(stack, torch::GradMode::is_enabled()); },
+ [](Stack& stack) { push(stack, torch::GradMode::is_enabled()); },
aliasAnalysisConservative()),
Operator(
"aten::set_grad_enabled(bool val) -> ()",
- [](Stack* stack) { torch::GradMode::set_enabled(pop(stack).toBool()); },
+ [](Stack& stack) { torch::GradMode::set_enabled(pop(stack).toBool()); },
aliasAnalysisConservative()),
});
} // namespace
diff --git a/torch/csrc/jit/runtime/static/fusion.cpp b/torch/csrc/jit/runtime/static/fusion.cpp
index b08b59f..0b41b8e 100644
--- a/torch/csrc/jit/runtime/static/fusion.cpp
+++ b/torch/csrc/jit/runtime/static/fusion.cpp
@@ -39,7 +39,7 @@
auto g = node->g(attr::Subgraph);
auto module = std::make_shared<torch::jit::StaticModule>(g);
auto num_inputs = module->num_inputs();
- return [module, num_inputs](Stack* stack) {
+ return [module, num_inputs](Stack& stack) {
RECORD_FUNCTION("Static Runtime", std::vector<c10::IValue>());
auto inps = torch::jit::last(stack, num_inputs);
// TODO maybe avoid call to vec
@@ -48,10 +48,10 @@
if (module->num_outputs() > 1) {
for (auto& o : outputs.toTuple()->elements()) {
- push_one(*stack, std::move(o));
+ push_one(stack, std::move(o));
}
} else {
- push_one(*stack, std::move(outputs));
+ push_one(stack, std::move(outputs));
}
return 0;
};
diff --git a/torch/csrc/jit/runtime/static/impl.cpp b/torch/csrc/jit/runtime/static/impl.cpp
index ee8e903..e224478 100644
--- a/torch/csrc/jit/runtime/static/impl.cpp
+++ b/torch/csrc/jit/runtime/static/impl.cpp
@@ -1440,7 +1440,7 @@
}
DCHECK(op_);
- op_->operator()(&stack);
+ op_->operator()(stack);
DCHECK_EQ(stack.size(), node_->outputs().size());
for (const auto i : c10::irange(node_->outputs().size())) {