| #pragma once |
| |
| // ${generated_comment} |
| |
| #include <ATen/core/Tensor.h> |
| #include <ATen/TracerMode.h> |
| #include <ATen/core/grad_mode.h> |
| #include <c10/util/ArrayRef.h> |
| #include <c10/core/MemoryFormat.h> |
| #include <torch/csrc/api/include/torch/detail/TensorDataContainer.h> |
| #include <torch/csrc/autograd/variable.h> |
| |
| #ifndef AT_PER_OPERATOR_HEADERS |
| #include <ATen/Functions.h> |
| #else |
| #include <ATen/ops/from_blob.h> |
| $ops_headers |
| #endif |
| |
| #include <functional> |
| #include <initializer_list> |
| #include <utility> |
| |
| namespace torch { |
| |
| /// NOTE: Currently `torch::tensor(...)` doesn't support mixed data types |
| /// (i.e. `torch::tensor({{bool, 2.0}})` doesn't work). We might be able to |
| /// support it in the future by iterating over all sub-lists to find |
| /// the largest data type that can represent all of the elements, or by using |
| /// variadic templates. |
| /// |
| /// NOTE: C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / `std::vector` / |
| /// (nested) braced-init-list of floating-point types always produces a tensor of dtype |
| /// `torch::get_default_dtype()`, matching Python `torch.tensor` behavior. |
| /// |
| /// NOTE: C++ `torch::tensor` with an integer type or an `at::ArrayRef` / `std::vector` / |
| /// (nested) braced-init-list of integer types always produces a tensor of dtype `at::kLong` |
| /// (aka. int64_t), matching Python `torch.tensor` behavior. |
| /// |
| /// NOTE: The following dtypes are not supported by `torch::tensor` currently: |
| /// - `unsigned int` |
| /// - `unsigned long int` |
| /// - `unsigned long long int` |
| /// - `long long int` |
| inline at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const at::TensorOptions& options = {}) { |
| return autograd::make_variable( |
| // note: we remove the requires_grad setting from the TensorOptions because |
| // it is ignored anyways (and we actually have an assertion that it isn't set |
| // which would fail otherwise). We handle requires_grad explicitly here |
| // instead of passing it through to the kernel. |
| tensor_data_container.convert_to_tensor(options.requires_grad(c10::nullopt)), |
| options.requires_grad()); |
| } |
| |
| /// A generic deleter function. |
| using Deleter = std::function<void(void*)>; |
| using at::MemoryFormat; |
| |
| /// Exposes the given `data` as a `Tensor` without taking ownership of the |
| /// original data. `sizes` should specify the shape of the tensor, `strides` the |
| /// stride in each dimension. The `deleter` function (a |
| /// `std::function<void(void*)>`) will be called on the `data` when the Tensor |
| /// data would normally be deallocated. The `TensorOptions` specify additional |
| /// configuration options for the returned tensor, such as what type to |
| /// interpret the `data` as. |
| inline at::Tensor from_blob( |
| void* data, |
| at::IntArrayRef sizes, |
| at::IntArrayRef strides, |
| const Deleter& deleter, |
| const at::TensorOptions& options = at::TensorOptions()) { |
| at::Tensor tensor = ([&]() { |
| at::AutoDispatchBelowAutograd guard; // TODO: remove |
| at::tracer::impl::NoTracerDispatchMode tracer_guard; |
| return at::from_blob(data, sizes, strides, deleter, options.requires_grad(c10::nullopt)); |
| })(); |
| return autograd::make_variable(tensor, options.requires_grad()); |
| } |
| |
| /// Exposes the given `data` as a `Tensor` without taking ownership of the |
| /// original data. `sizes` should specify the shape of the tensor, `strides` the |
| /// stride in each dimension. The `TensorOptions` |
| /// specify additional configuration options for the returned tensor, such as |
| /// what type to interpret the `data` as. |
| inline at::Tensor from_blob( |
| void* data, |
| at::IntArrayRef sizes, |
| at::IntArrayRef strides, |
| const at::TensorOptions& options = at::TensorOptions()) { |
| at::Tensor tensor = ([&]() { |
| at::AutoDispatchBelowAutograd guard; // TODO: remove |
| at::tracer::impl::NoTracerDispatchMode tracer_guard; |
| return at::from_blob(data, sizes, strides, options.requires_grad(c10::nullopt)); |
| })(); |
| return autograd::make_variable(tensor, options.requires_grad()); |
| } |
| |
| /// Exposes the given `data` as a `Tensor` without taking ownership of the |
| /// original data. `sizes` should specify the shape of the tensor. The `deleter` |
| /// (a `std::function<void(void*)>`) function will be called on the `data` when |
| /// the Tensor data would normally be deallocated. The `TensorOptions` specify |
| /// additional configuration options for the returned tensor, such as what type |
| /// to interpret the `data` as. |
| inline at::Tensor from_blob( |
| void* data, |
| at::IntArrayRef sizes, |
| const Deleter& deleter, |
| const at::TensorOptions& options = at::TensorOptions()) { |
| at::Tensor tensor = ([&]() { |
| at::AutoDispatchBelowAutograd guard; // TODO: remove |
| at::tracer::impl::NoTracerDispatchMode tracer_guard; |
| return at::from_blob(data, sizes, deleter, options.requires_grad(c10::nullopt)); |
| })(); |
| return autograd::make_variable(tensor, options.requires_grad()); |
| } |
| |
| /// Exposes the given `data` as a `Tensor` without taking ownership of the |
| /// original data. `sizes` should specify the shape of the tensor. The |
| /// `TensorOptions` specify additional configuration options for the returned |
| /// tensor, such as what type to interpret the `data` as. |
| inline at::Tensor from_blob( |
| void* data, |
| at::IntArrayRef sizes, |
| const at::TensorOptions& options = at::TensorOptions()) { |
| at::Tensor tensor = ([&]() { |
| at::AutoDispatchBelowAutograd guard; // TODO: remove |
| at::tracer::impl::NoTracerDispatchMode tracer_guard; |
| return at::from_blob(data, sizes, options.requires_grad(c10::nullopt)); |
| })(); |
| return autograd::make_variable(tensor, options.requires_grad()); |
| } |
| |
| ${function_definitions} |
| |
| } // namespace torch |