blob: b8f4a23278a3ed880ee05a216a7fe658ce6914b1 [file] [log] [blame]
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || \
defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || \
defined(THC_REAL_IS_HALF)
#define RealStr "float"
#else
#define RealStr "int"
#endif
#ifdef THC_REAL_IS_HALF
#define AS_REAL(x) THC_float2half(x)
#else
#define AS_REAL(x) x
#endif
#ifndef THC_GENERIC_FILE
#define IS_CUDA false
#define CUDA_FLOAT false
#else
#define IS_CUDA true
#define CUDA_INT defined(THC_REAL_IS_INT)
#define CUDA_LONG defined(THC_REAL_IS_LONG)
#define CUDA_FLOAT defined(THC_REAL_IS_FLOAT)
#define CUDA_DOUBLE defined(THC_REAL_IS_DOUBLE)
#define CUDA_HALF defined(THC_REAL_IS_HALF)
#endif
#if IS_CUDA
#define THPIndexTensor THCPLongTensor
#define THPIndexTensorClass THCPLongTensorClass
#else
#define THPIndexTensor THPLongTensor
#define THPIndexTensorClass THPLongTensorClass
#endif
#if IS_CUDA
#define THPBoolTensor THCPByteTensor
#define THPBoolTensorClass THCPByteTensorClass
#else
#define THPBoolTensor THPByteTensor
#define THPBoolTensorClass THPByteTensorClass
#endif
[[
name: THPTensor_(writeMetadata)
python_name: _write_metadata
only_register: True
]]
PyObject * THPTensor_(writeMetadata)(THPTensor *self, PyObject *args)
{
if (!args || PyTuple_Size(args) != 1) {
THPUtils_invalidArguments(args, "_write_metadata", 1, "a single file object");
return NULL;
}
int fd = PyObject_AsFileDescriptor(PyTuple_GET_ITEM(args, 0));
if (fd == -1) {
THPUtils_setError("write_file couln't retrieve file descriptor from given object");
return NULL;
}
THPTensor_(writeMetadataRaw)(self->cdata, fd);
Py_RETURN_NONE;
}
[[
name: THPTensor_(newWithMetadataFile)
python_name: _new_with_metadata_file
only_register: True
method_flags: METH_STATIC
]]
PyObject * THPTensor_(newWithMetadataFile)(PyObject *_null, PyObject *args)
{
if (!args || PyTuple_Size(args) != 2 ||
!(THPStorage_(Check)(PyTuple_GET_ITEM(args, 1)) ||
PyTuple_GET_ITEM(args, 1) == Py_None)) {
THPUtils_invalidArguments(args, "_new_with_metadata_file", 1, "single file object and a storage object");
return NULL;
}
int fd = PyObject_AsFileDescriptor(PyTuple_GET_ITEM(args, 0));
if (fd == -1) {
THPUtils_setError("write_file couln't retrieve file descriptor from given object");
return NULL;
}
THStorage *storage = NULL;
if (PyTuple_GET_ITEM(args, 1) != Py_None) {
storage = ((THPStorage*)PyTuple_GET_ITEM(args, 1))->cdata;
}
THTensorPtr tensor = THPTensor_(newWithMetadataFileRaw)(fd, storage);
PyObject *result = THPTensor_(New)(tensor);
tensor.release();
return result;
}
[[
name: THPTensor_(toNumpy)
defined_if: defined(NUMPY_TYPE_ENUM)
python_name: numpy
only_register: True
]]
#ifdef NUMPY_TYPE_ENUM
// Adapted from fblualib
PyObject * THPTensor_(toNumpy)(THPTensor *self, PyObject *args) {
npy_intp zero = 0;
int ndim;
npy_intp* sizes_ptr;
std::unique_ptr<npy_intp[]> sizes;
std::unique_ptr<npy_intp[]> strides;
// Numpy and Torch disagree on empty tensors. In Torch, an empty tensor
// is a tensor with zero dimensions. In Numpy, a tensor with zero dimensions
// is a scalar (with one element). So we'll convert an empty Torch tensor
// to a 1d Numpy tensor of shape [0]. Also see pushTensor in PythonToLua.cpp.
ndim = THTensor_(nDimension)(LIBRARY_STATE self->cdata);
if (ndim != 0) {
sizes.reset(new npy_intp[ndim]);
std::copy(self->cdata->size, self->cdata->size + ndim, sizes.get());
sizes_ptr = sizes.get();
if (!THTensor_(isContiguous)(LIBRARY_STATE self->cdata)) {
strides.reset(new npy_intp[ndim]);
// Numpy strides use bytes; Torch strides use element counts.
for (int i = 0; i < ndim; ++i) {
strides[i] = self->cdata->stride[i] * sizeof(real);
}
}
} else {
ndim = 1;
sizes_ptr = &zero;
}
THPObjectPtr array = PyArray_New(
&PyArray_Type, ndim, sizes_ptr, NUMPY_TYPE_ENUM,
strides.get(), self->cdata->storage->data, 0,
NPY_ARRAY_ALIGNED, nullptr);
if (!array) {
THPUtils_setError("an error occured during conversion to numpy array");
return NULL;
}
// Create a PythonStorage object to hold the reference count.
// PyArray_SetBaseObject steals the reference to the base object.
Py_INCREF(self);
if (PyArray_SetBaseObject((PyArrayObject*)(array.get()), (PyObject*)self) == -1) {
Py_DECREF(self);
THPUtils_setError("an error occured during conversion to numpy array");
return NULL;
}
return array.release();
}
THTensor* THPTensor_(fromNumpy)(PyObject *numpy_array) {
PyArrayObject *array = (PyArrayObject*)numpy_array;
THStoragePtr storage = THStorage_(newWithDataAndAllocator)(
(real*)PyArray_DATA(array),
PyArray_NBYTES(array) / sizeof(real),
&THNumpyArrayAllocator,
new NumpyArrayAllocator(numpy_array));
// Numpy and Torch disagree on empty tensors. In Torch, an empty
// tensor is a tensor with zero dimensions. In Numpy, an empty tensor
// keeps its shape, but has 0 as the size of one of the dimensions.
// So we'll convert all Numpy tensors of 0 elements to empty Torch tensors.
if (PyArray_SIZE(array) != 0) {
auto ndim = PyArray_NDIM(array);
THLongStoragePtr sizes = THLongStorage_newWithSize(ndim);
long *sizes_data = sizes->data;
for (int i = 0; i < ndim; ++i) {
sizes_data[i] = PyArray_DIM(array, i);
}
THLongStoragePtr strides = THLongStorage_newWithSize(ndim);
long *strides_data = strides->data;
for (int i = 0; i < ndim; ++i) {
strides_data[i] = PyArray_STRIDE(array, i) / sizeof(real); // numpy uses bytes, torch uses elements
}
THTensor *result = THTensor_(newWithStorage)(storage, 0, sizes, strides);
return result;
} else {
THTensor *result = THTensor_(newWithStorage)(storage, 0, NULL, NULL);
return result;
}
}
#endif
[[
name: getDevice
python_name: get_device
defined_if: IS_CUDA
return: long
arguments:
- THTensor* self
]]
// TODO: check that there are no args
[[
name: THPTensor_(elementSize)
python_name: element_size
only_register: True
]]
static PyObject * THPTensor_(elementSize)(THPTensor *self, PyObject *args)
{
return PyLong_FromLong(THStorage_(elementSize)(LIBRARY_STATE_NOARGS));
}
// TODO: check that there are no args
[[
name: THPTensor_(storage)
python_name: storage
only_register: True
]]
static PyObject * THPTensor_(storage)(THPTensor *self, PyObject *args)
{
// TODO: memory leak on error
THStorage *result = THTensor_(storage)(LIBRARY_STATE self->cdata);
if (result == NULL)
Py_RETURN_NONE;
THStorage_(retain)(LIBRARY_STATE result);
THStoragePtr _tmp = result;
PyObject *ret = THPStorage_(New)(result);
_tmp.release();
return ret;
}
[[
name: storageOffset
python_name: storage_offset
return: long
arguments:
- THTensor* self
]]
[[
name: nDimension
python_name: ndimension
return: long
arguments:
- THTensor* self
]]
[[
name: THPTensor_(nDimension)
python_name: dim
only_register: True
]]
[[
name: free
return: self
arguments:
- THTensor* self
]]
[[
name: retain
return: self
arguments:
- THTensor* self
]]
[[
name: resize_
cname: resize
return: self
long_args: True
arguments:
- THTensor* self
- THLongStorage* long_args
- CONSTANT NULL
]]
[[
name: zeros
only_stateless: True
return: argument 0
long_args: True
arguments:
- arg: THTensor* result
allocate: True
- THLongStorage* long_args
]]
[[
name: zeros_
cname: zeros
return: self
long_args: True
arguments:
- THTensor* self
- THLongStorage* long_args
]]
[[
name: ones
only_stateless: True
return: argument 0
long_args: True
arguments:
- arg: THTensor* result
allocate: True
- THLongStorage* long_args
]]
[[
name: ones_
cname: ones
return: self
long_args: True
arguments:
- THTensor* self
- THLongStorage* long_args
]]
[[
name: numel
return: long
with_stateless: True
arguments:
- THTensor* self
]]
[[
name: THPTensor_(numel)
python_name: nelement
only_register: True
]]
[[
name: set_
cname: set
return: argument 0
options:
- cname: set
arguments:
- THTensor* self
- THTensor* source
- cname: setStorage
arguments:
- THTensor* self
- CONSTANT NULL, 0, NULL, NULL
- cname: setStorage
before_call: THLongStoragePtr __storage_size = THLongStorage_newWithSize1(THStorage_(size)(LIBRARY_STATE ((THPStorage*)$arg1)->cdata));
arguments:
- THTensor* self
- THStorage* storage
- CONSTANT 0
- CONSTANT __storage_size.get()
- CONSTANT NULL
- cname: setStorage
arguments:
- THTensor* self
- THStorage* sourceStorage
- long storage_offset
- THLongStorage* sizes
- THLongStorage* strides
- cname: setStorage
long_args: True
arguments:
- THTensor* self
- THStorage* sourceStorage
- long storage_offset
- THLongStorage* long_args
- CONSTANT NULL
]]
[[
name: THPTensor_(select)
python_name: select
only_register: True
]]
static PyObject * THPTensor_(select)(THPTensor *self, PyObject *args)
{
HANDLE_TH_ERRORS
long dim, idx;
if (!PyArg_ParseTuple(args, "ll", &dim, &idx))
return NULL;
int ndim = THTensor_(nDimension)(LIBRARY_STATE self->cdata);
if(ndim > 1) {
THTensor *selected = THTensor_(newWithTensor)(LIBRARY_STATE self->cdata);
THTensor_(select)(LIBRARY_STATE selected, NULL, dim, idx);
return THPTensor_(New)(selected);
}
else {
THArgCheck(ndim == 1, 1, "empty Tensor");
return THPUtils_(newReal)(THTensor_(get1d)(LIBRARY_STATE self->cdata, idx));
}
END_HANDLE_TH_ERRORS
}
#if defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT)
#define BUILD_REAL_FMT "d"
#else
#define BUILD_REAL_FMT "L"
#endif
#if !IS_CUDA
[[
name: THPTensor_(apply)
python_name: apply_
defined_if: "!IS_CUDA"
only_register: True
]]
static PyObject * THPTensor_(apply)(THPTensor *self, PyObject *arg)
{
HANDLE_TH_ERRORS
if (!PyCallable_Check(arg)) {
THPUtils_setError("apply requires a callable as it's first argument");
return NULL;
}
THTensor *tensor = self->cdata;
TH_TENSOR_APPLY(real, tensor,
PyObject *ret =
PyObject_CallFunction(arg, (char*)BUILD_REAL_FMT, *tensor_data);
if (!ret)
return NULL;
if (!THPUtils_(checkReal)(ret)) {
Py_DECREF(ret);
THError("given function should return a number");
}
*tensor_data = THPUtils_(unpackReal)(ret);
Py_DECREF(ret);
);
Py_INCREF(self);
return (PyObject*)self;
END_HANDLE_TH_ERRORS
}
[[
name: THPTensor_(map)
python_name: map_
defined_if: "!IS_CUDA"
only_register: True
]]
static PyObject * THPTensor_(map)(THPTensor *self, PyObject *args)
{
HANDLE_TH_ERRORS
PyObject *fn;
THPTensor *src_object;
if (!PyArg_ParseTuple(args, "O!O&", THPTensorClass, &src_object, THPUtils_getCallable, &fn))
return NULL;
THTensor *tensor = self->cdata;
THTensor *src = src_object->cdata;
TH_TENSOR_APPLY2(real, tensor, real, src,
PyObject *ret =
PyObject_CallFunction(fn, (char*)(BUILD_REAL_FMT BUILD_REAL_FMT),
*tensor_data, *src_data);
if (!ret)
return NULL;
if (!THPUtils_(checkReal)(ret)) {
Py_DECREF(ret);
THError("given function should return a number");
}
*tensor_data = THPUtils_(unpackReal)(ret);
Py_DECREF(ret);
);
Py_INCREF(self);
return (PyObject*)self;
END_HANDLE_TH_ERRORS
}
[[
name: THPTensor_(map2)
python_name: map2_
defined_if: "!IS_CUDA"
only_register: True
]]
static PyObject * THPTensor_(map2)(THPTensor *self, PyObject *args)
{
HANDLE_TH_ERRORS
PyObject *fn;
THPTensor *src1_object;
THPTensor *src2_object;
if (!PyArg_ParseTuple(args, "O!O!O&", THPTensorClass, &src1_object, THPTensorClass, &src2_object, THPUtils_getCallable, &fn))
return NULL;
THTensor *tensor = self->cdata;
THTensor *src1 = src1_object->cdata;
THTensor *src2 = src2_object->cdata;
TH_TENSOR_APPLY3(real, tensor, real, src1, real, src2,
PyObject *ret =
PyObject_CallFunction(fn, (char*)(BUILD_REAL_FMT BUILD_REAL_FMT BUILD_REAL_FMT),
*tensor_data, *src1_data, *src2_data);
if (!ret)
return NULL;
if (!THPUtils_(checkReal)(ret)) {
Py_DECREF(ret);
THError("given function should return a number");
}
*tensor_data = THPUtils_(unpackReal)(ret);
Py_DECREF(ret);
);
Py_INCREF(self);
return (PyObject*)self;
END_HANDLE_TH_ERRORS
}
#endif /* !IS_CUDA */
#undef BUILD_REAL_FMT
[[
name: abs
return: argument 0
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_LONG) || defined(TH_REAL_IS_INT) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE || CUDA_INT || CUDA_LONG
with_stateless: True
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
]]
[[
name: abs_
cname: abs
return: self
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_LONG) || defined(TH_REAL_IS_INT) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE || CUDA_INT || CUDA_LONG
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: sigmoid_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: sigmoid
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: sigmoid
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: sigmoid
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: log_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: log
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: log
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: log1p_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: log1p
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: log1p
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: exp_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: exp
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: exp
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: cos_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: cos
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: cos
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: acos_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: acos
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: acos
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: cosh_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: cosh
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: cosh
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: sin_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: sin
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: sin
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: asin_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: asin
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: asin
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: sinh_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: sinh
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: sinh
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: tan_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: tan
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: tan
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: atan_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: atan
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: atan
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: tanh_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: tanh
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: tanh
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: sqrt_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: sqrt
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: sqrt
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: rsqrt_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: rsqrt
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: rsqrt
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: ceil_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: ceil
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: ceil
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: floor_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: floor
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: floor
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: round_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: round
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: round
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: trunc_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: trunc
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: trunc
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: frac_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
cname: frac
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: frac
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: mean
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
with_stateless: True
options:
- cname: meanall
return: accreal
arguments:
- THTensor* self
- cname: mean
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- long dim
]]
[[
name: var
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
with_stateless: True
options:
- cname: varall
return: accreal
arguments:
- THTensor* self
- cname: var
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- long dim
- CONSTANT false
]]
[[
name: std
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
with_stateless: True
options:
- cname: stdall
return: accreal
arguments:
- THTensor* self
- cname: std
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- long dim
- CONSTANT false
]]
[[
name: norm
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
with_stateless: True
options:
- cname: normall
return: accreal
arguments:
- THTensor* self
- arg: real p
default: 2
- cname: norm
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- real p
- long dim
]]
[[
name: renorm
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
with_stateless: True
options:
- cname: renorm
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- real p
- long dim
- real maxnorm
]]
[[
name: renorm_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
options:
- cname: renorm
return: self
arguments:
- THTensor* self
- THTensor* self
- real p
- long dim
- real maxnorm
]]
[[
name: dist
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
with_stateless: True
options:
- cname: dist
return: accreal
arguments:
- THTensor* self
- THTensor* other
- arg: real p
default: 2
]]
[[
name: cinv
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
options:
- cname: cinv
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
]]
[[
name: cinv_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
options:
- cname: cinv
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: neg
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
with_stateless: True
options:
- cname: neg
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
]]
[[
name: neg_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT || CUDA_HALF || CUDA_DOUBLE
options:
- cname: neg
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: atan2
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
with_stateless: True
cname: atan2
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: atan2_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
cname: atan2
return: argument 0
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: data_ptr
return: void*
cname: data
arguments:
- THTensor* self
]]
// These options look the same in stateful method - only the first one will
// be available. Still, they differ in torch.pow.
[[
name: pow
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
with_stateless: True
return: argument 0
options:
- cname: pow
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- real exponent
- cname: cpow
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- THTensor* exponent
- cname: tpow
arguments:
- arg: THTensor* destination
allocate: True
- real base
- THTensor* self
]]
[[
name: pow_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
return: argument 0
cname: pow
options:
- cname: pow
arguments:
- THTensor* self
- THTensor* self
- real exponent
- cname: cpow
arguments:
- THTensor* self
- THTensor* self
- THTensor* exponent
]]
[[
name: lerp
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
with_stateless: True
return: argument 0
cname: lerp
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- THTensor* end
- real weight
]]
[[
name: lerp_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || CUDA_FLOAT
return: self
cname: lerp
arguments:
- THTensor* self
- THTensor* self
- THTensor* end
- real weight
]]
[[
name: linspace
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
only_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- real start
- real end
- arg: long steps
default: 100
]]
[[
name: logspace
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
only_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- real start
- real end
- arg: long steps
default: 100
]]
[[
name: histc
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
with_stateless: True
return: argument 0
options:
- arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- CONSTANT 100
- CONSTANT 0
- CONSTANT 0
- arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- long bins
- CONSTANT 0
- CONSTANT 0
- arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- long bins
- real min
- CONSTANT 0
- arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- long bins
- real min
- real max
]]
[[
name: zero_
cname: zero
return: self
arguments:
- THTensor* self
]]
[[
name: size
options:
- return: long
cname: size
arguments:
- THTensor* self
- long dim
- return: THLongStorage*
cname: newSizeOf
arguments:
- THTensor* self
]]
[[
name: stride
options:
- return: long
cname: stride
arguments:
- THTensor* self
- long dim
- return: THLongStorage*
cname: newStrideOf
arguments:
- THTensor* self
]]
[[
name: fill_
cname: fill
return: self
arguments:
- THTensor* self
- real value
]]
[[
name: isSameSizeAs
python_name: is_same_size
return: bool
arguments:
- THTensor* self
- THTensor* other
]]
[[
name: isContiguous
python_name: is_contiguous
return: bool
arguments:
- THTensor* self
]]
[[
name: isSetTo
python_name: is_set_to
return: bool
arguments:
- THTensor* self
- THTensor* tensor
]]
[[
name: isSize
python_name: is_size
return: bool
arguments:
- THTensor* self
- THLongStorage* size
]]
[[
name: cmax
defined_if: CUDA_FLOAT || !IS_CUDA
return: argument 0
with_stateless: True
options:
- cname: cmax
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- THTensor* other
- cname: cmaxValue
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- real value
]]
[[
name: cmax_
defined_if: CUDA_FLOAT || !IS_CUDA
return: self
options:
- cname: cmax
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
- cname: cmaxValue
arguments:
- THTensor* self
- THTensor* self
- real value
]]
[[
name: cmin
defined_if: CUDA_FLOAT || !IS_CUDA
return: argument 0
with_stateless: True
options:
- cname: cmin
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- THTensor* other
- cname: cminValue
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- real value
]]
[[
name: cmin_
defined_if: CUDA_FLOAT || !IS_CUDA
return: self
options:
- cname: cmin
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
- cname: cminValue
arguments:
- THTensor* self
- THTensor* self
- real value
]]
[[
name: sum
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
options:
- cname: sumall
return: accreal
arguments:
- THTensor* self
- cname: sum
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- long dim
]]
[[
name: prod
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
options:
- cname: prodall
return: accreal
arguments:
- THTensor* self
- cname: prod
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- long dim
]]
[[
name: cumsum
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- long dim
]]
[[
name: cumprod
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- long dim
]]
[[
name: sign
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
]]
[[
name: sign_
cname: sign
return: self
arguments:
- THTensor* self
- THTensor* self
]]
[[
name: trace
with_stateless: True
defined_if: "!IS_CUDA"
return: accreal
arguments:
- THTensor* self
]]
[[
name: add
with_stateless: True
return: argument 0
options:
- cname: add
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- real value
- cname: cadd
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- arg: real value
default: AS_REAL(1)
- THTensor* other
]]
[[
name: add_
return: argument 0
options:
- cname: add
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: cadd
arguments:
- THTensor* self
- THTensor* self
- arg: real value
default: AS_REAL(1)
- THTensor* other
]]
[[
name: sub
with_stateless: True
return: argument 0
options:
- cname: sub
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- real value
- cname: csub
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- arg: real value
default: AS_REAL(1)
- THTensor* other
]]
[[
name: sub_
return: argument 0
options:
- cname: sub
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: csub
arguments:
- THTensor* self
- THTensor* self
- arg: real value
default: AS_REAL(1)
- THTensor* other
]]
[[
name: mul
with_stateless: True
return: argument 0
options:
- cname: mul
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- real value
- cname: cmul
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: mul_
return: argument 0
options:
- cname: mul
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: cmul
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: div
with_stateless: True
return: argument 0
options:
- cname: div
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- real value
- cname: cdiv
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: div_
return: argument 0
options:
- cname: div
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: cdiv
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: fmod
defined_if: "!IS_CUDA"
return: argument 0
with_stateless: True
options:
- cname: fmod
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- real value
- cname: cfmod
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: fmod_
defined_if: "!IS_CUDA"
return: argument 0
options:
- cname: fmod
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: cfmod
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: remainder
defined_if: "!IS_CUDA"
return: argument 0
with_stateless: True
options:
- cname: remainder
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- real value
- cname: cremainder
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: remainder_
defined_if: "!IS_CUDA"
return: argument 0
options:
- cname: remainder
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: cremainder
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: clamp
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- real min
- real max
]]
[[
name: clamp_
defined_if: CUDA_FLOAT || !IS_CUDA
cname: clamp
return: self
arguments:
- THTensor* self
- THTensor* self
- real min
- real max
]]
[[
name: dot
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
return: accreal
arguments:
- THTensor* self
- THTensor* tensor
]]
[[
name: tril
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- arg: long k
default: 0
]]
[[
name: tril_
cname: tril
defined_if: CUDA_FLOAT || !IS_CUDA
return: self
arguments:
- THTensor* self
- THTensor* self
- arg: long k
default: 0
]]
[[
name: triu
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- arg: long k
default: 0
]]
[[
name: triu_
cname: triu
defined_if: CUDA_FLOAT || !IS_CUDA
return: self
arguments:
- THTensor* self
- THTensor* self
- arg: long k
default: 0
]]
[[
name: cross
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* destination
allocate: True
- THTensor* self
- THTensor* other
- arg: long dim
default: -1
]]
[[
name: eye
defined_if: "!IS_CUDA"
only_stateless: True
return: argument 0
options:
- arguments:
- arg: THTensor* result
allocate: True
- long n
- argument 1
- arguments:
- arg: THTensor* result
allocate: True
- long n
- long m
]]
[[
name: equal
defined_if: "!IS_CUDA"
with_stateless: True
return: bool
arguments:
- THTensor* self
- THTensor* other
]]
[[
name: diag
defined_if: "!IS_CUDA"
with_stateless: True
return: argument 0
options:
- arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- arg: long diagonal
default: 0
]]
[[
name: lt
return: argument 0
options:
- cname: ltValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- real value
- cname: ltTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: lt_
return: self
options:
- cname: ltValueT
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: ltTensorT
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: lt
only_stateless: True
return: argument 0
options:
- cname: ltValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- real value
- cname: ltTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- THTensor* other
- cname: ltValueT
arguments:
- THTensor* result
- THTensor* tensor
- real value
- cname: ltTensorT
arguments:
- THTensor* result
- THTensor* tensor
- THTensor* other
]]
[[
name: gt
return: argument 0
options:
- cname: gtValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- real value
- cname: gtTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: gt_
return: self
options:
- cname: gtValueT
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: gtTensorT
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: gt
only_stateless: True
return: argument 0
options:
- cname: gtValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- real value
- cname: gtTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- THTensor* other
- cname: gtValueT
arguments:
- THTensor* result
- THTensor* tensor
- real value
- cname: gtTensorT
arguments:
- THTensor* result
- THTensor* tensor
- THTensor* other
]]
[[
name: le
return: argument 0
options:
- cname: leValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- real value
- cname: leTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: le_
return: self
options:
- cname: leValueT
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: leTensorT
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: le
only_stateless: True
return: argument 0
options:
- cname: leValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- real value
- cname: leTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- THTensor* other
- cname: leValueT
arguments:
- THTensor* result
- THTensor* tensor
- real value
- cname: leTensorT
arguments:
- THTensor* result
- THTensor* tensor
- THTensor* other
]]
[[
name: ge
return: argument 0
options:
- cname: geValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- real value
- cname: geTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: ge_
return: self
options:
- cname: geValueT
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: geTensorT
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: ge
only_stateless: True
return: argument 0
options:
- cname: geValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- real value
- cname: geTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- THTensor* other
- cname: geValueT
arguments:
- THTensor* result
- THTensor* tensor
- real value
- cname: geTensorT
arguments:
- THTensor* result
- THTensor* tensor
- THTensor* other
]]
[[
name: eq
return: argument 0
options:
- cname: eqValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- real value
- cname: eqTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: eq_
return: self
options:
- cname: eqValueT
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: eqTensorT
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: eq
only_stateless: True
return: argument 0
options:
- cname: eqValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- real value
- cname: eqTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- THTensor* other
- cname: eqValueT
arguments:
- THTensor* result
- THTensor* tensor
- real value
- cname: eqTensorT
arguments:
- THTensor* result
- THTensor* tensor
- THTensor* other
]]
[[
name: ne
return: argument 0
options:
- cname: neValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- real value
- cname: neTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* self
- THTensor* other
]]
[[
name: ne_
return: self
options:
- cname: neValueT
arguments:
- THTensor* self
- THTensor* self
- real value
- cname: neTensorT
arguments:
- THTensor* self
- THTensor* self
- THTensor* other
]]
[[
name: ne
only_stateless: True
return: argument 0
options:
- cname: neValue
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- real value
- cname: neTensor
arguments:
- arg: THBoolTensor* result
allocate: True
- THTensor* tensor
- THTensor* other
- cname: neValueT
arguments:
- THTensor* result
- THTensor* tensor
- real value
- cname: neTensorT
arguments:
- THTensor* result
- THTensor* tensor
- THTensor* other
]]
[[
name: min
with_stateless: True
options:
- cname: minall
return: real
arguments:
- THTensor* self
- cname: min
return: argument 0,1
arguments:
- arg: THTensor* min
allocate: True
- arg: THIndexTensor* min_indices
allocate: True
- THTensor* self
- long dim
]]
[[
name: max
with_stateless: True
options:
- cname: maxall
return: real
arguments:
- THTensor* self
- cname: max
return: argument 0,1
arguments:
- arg: THTensor* max
allocate: True
- arg: THIndexTensor* max_indices
allocate: True
- THTensor* self
- long dim
]]
[[
name: kthvalue
defined_if: "!IS_CUDA"
with_stateless: True
return: argument 0,1
options:
- before_call: long __last_dim = THTensor_(nDimension)(LIBRARY_STATE ((THPTensor*)$arg2)->cdata)-1;
arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- long k
- CONSTANT __last_dim
- arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- long k
- long dim
]]
[[
name: mode
defined_if: "!IS_CUDA"
with_stateless: True
return: argument 0,1
options:
- before_call: long __last_dim = THTensor_(nDimension)(LIBRARY_STATE ((THPTensor*)$arg2)->cdata)-1;
arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- CONSTANT __last_dim
- arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- long dim
]]
[[
name: median
defined_if: "!IS_CUDA"
with_stateless: True
return: argument 0,1
options:
- before_call: long __last_dim = THTensor_(nDimension)(LIBRARY_STATE ((THPTensor*)$arg2)->cdata)-1;
arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- CONSTANT __last_dim
- arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- long dim
]]
[[
name: sort
with_stateless: True
return: argument 0,1
options:
- before_call: long __last_dim = THTensor_(nDimension)(LIBRARY_STATE ((THPTensor*)$arg2)->cdata)-1;
arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- CONSTANT __last_dim
- CONSTANT false
- arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- long dim
- CONSTANT false
- arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- long dim
- bool descending
]]
[[
name: topk
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
return: argument 0,1
options:
- before_call: long __last_dim = THTensor_(nDimension)(LIBRARY_STATE ((THPTensor*)$arg2)->cdata)-1;
arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- long k
- CONSTANT __last_dim
- CONSTANT false
- CONSTANT false
- arguments:
- arg: THTensor* values
allocate: True
- arg: THIndexTensor* indices
allocate: True
- THTensor* self
- long k
- long dim
- arg: bool smallest
default: "false"
- arg: bool sorted
default: "false"
]]
[[
name: maskedFill_
cname: maskedFill
python_name: masked_fill_
return: self
arguments:
- THTensor* self
- THBoolTensor* mask
- real value
]]
[[
name: maskedCopy_
cname: maskedCopy
python_name: masked_copy_
return: self
arguments:
- THTensor* self
- THBoolTensor* mask
- THTensor* source
]]
[[
name: maskedSelect
python_name: masked_select
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- THBoolTensor* mask
]]
[[
name: all
defined_if: defined(TH_REAL_IS_BYTE)
cname: logicalall
return: bool
arguments:
- THTensor* self
]]
[[
name: any
defined_if: defined(TH_REAL_IS_BYTE)
cname: logicalany
return: bool
arguments:
- THTensor* self
]]
[[
name: transpose
with_stateless: True
cname: newTranspose
return: THTensor*
arguments:
- THTensor* self
- long dim0
- long dim1
]]
[[
name: transpose_
cname: transpose
return: self
arguments:
- THTensor* self
- THTensor* self
- long dim0
- long dim1
]]
[[
name: t
with_stateless: True
cname: newTranspose
return: THTensor*
arguments:
- THTensor* self
- CONSTANT 0
- CONSTANT 1
]]
[[
name: t_
cname: transpose
return: self
arguments:
- THTensor* self
- THTensor* self
- CONSTANT 0
- CONSTANT 1
]]
[[
name: squeeze
with_stateless: True
return: argument 0
options:
- arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- cname: squeeze1d
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- long dim
]]
[[
name: squeeze_
return: self
options:
- cname: squeeze
arguments:
- THTensor* self
- THTensor* self
- cname: squeeze1d
arguments:
- THTensor* self
- THTensor* self
- long dim
]]
[[
name: nonzero
defined_if: "!IS_CUDA"
with_stateless: True
return: argument 0
arguments:
- arg: THIndexTensor* result
allocate: True
- THTensor* self
]]
[[
name: contiguous
cname: newContiguous
return: THTensor*
arguments:
- THTensor* self
]]
[[
name: clone
cname: newClone
return: THTensor*
arguments:
- THTensor* self
]]
[[
name: resizeAs_
python_name: resize_as_
cname: resizeAs
return: self
arguments:
- THTensor* self
- THTensor* template
]]
[[
name: indexSelect
python_name: index_select
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- long dim
- THIndexTensor* index
]]
[[
name: indexCopy_
python_name: index_copy_
cname: indexCopy
return: argument 0
arguments:
- THTensor* self
- long dim
- THIndexTensor* index
- THTensor* source
]]
[[
name: indexAdd_
python_name: index_add_
defined_if: CUDA_FLOAT || !IS_CUDA
cname: indexAdd
return: argument 0
arguments:
- THTensor* self
- long dim
- THIndexTensor* index
- THTensor* source
]]
[[
name: indexFill_
python_name: index_fill_
cname: indexFill
return: argument 0
arguments:
- THTensor* self
- long dim
- THIndexTensor* index
- real value
]]
[[
name: narrow
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- long dimension
- long start
- long length
]]
[[
name: unfold
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- long dimension
- long size
- long step
]]
[[
name: range
only_stateless: True
defined_if: "!IS_CUDA"
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- accreal xmin
- accreal xmax
- arg: accreal step
default: 1
]]
[[
name: scatter_
defined_if: "!IS_CUDA"
return: argument 0
options:
- cname: scatter
arguments:
- THTensor* self
- long dim
- THIndexTensor* index
- THTensor* src
- cname: scatterFill
arguments:
- THTensor* self
- long dim
- THIndexTensor* index
- real value
]]
[[
name: gather
defined_if: "!IS_CUDA"
with_stateless: True
return: argument 0
before_call: |
THLongStoragePtr _size = THLongTensor_newSizeOf(LIBRARY_STATE ((THPLongTensor*)$arg3)->cdata);
THTensor_(resize)(LIBRARY_STATE ((THPTensor*)$arg0)->cdata, _size, NULL);
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- long dim
- THLongTensor* index
]]
[[
name: addmm
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- arg: real beta
default: AS_REAL(1)
- THTensor* self
- arg: real alpha
default: AS_REAL(1)
- THTensor* mat1
- THTensor* mat2
]]
[[
name: addmm_
cname: addmm
return: self
arguments:
- THTensor* self
- arg: real beta
default: AS_REAL(1)
- THTensor* self
- arg: real alpha
default: AS_REAL(1)
- THTensor* mat1
- THTensor* mat2
]]
[[
name: addmv
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- arg: real beta
default: AS_REAL(1)
- THTensor* self
- arg: real alpha
default: AS_REAL(1)
- THTensor* mat
- THTensor* vec
]]
[[
name: addmv_
cname: addmv
return: self
arguments:
- THTensor* self
- arg: real beta
default: AS_REAL(1)
- THTensor* self
- arg: real alpha
default: AS_REAL(1)
- THTensor* mat
- THTensor* vec
]]
[[
name: addr
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- arg: real beta
default: AS_REAL(1)
- THTensor* self
- arg: real alpha
default: AS_REAL(1)
- THTensor* vec1
- THTensor* vec2
]]
[[
name: addr_
cname: addr
return: self
arguments:
- THTensor* self
- arg: real beta
default: AS_REAL(1)
- THTensor* self
- arg: real alpha
default: AS_REAL(1)
- THTensor* vec1
- THTensor* vec2
]]
[[
name: ger
only_stateless: True
cname: addr
return: argument 0
before_call: |
long s1 = THTensor_(size)(LIBRARY_STATE ((THPTensor*)$arg4)->cdata, 0);
long s2 = THTensor_(size)(LIBRARY_STATE ((THPTensor*)$arg5)->cdata, 0);
THTensor_(resize2d)(LIBRARY_STATE ((THPTensor*)$arg0)->cdata, s1, s2);
arguments:
- arg: THTensor* result
allocate: True
- CONSTANT AS_REAL(0)
- argument 0
- CONSTANT AS_REAL(1)
- THTensor* vec1
- THTensor* vec2
]]
[[
name: mv
only_stateless: True
cname: addmv
return: argument 0
before_call: |
long s = THTensor_(size)(LIBRARY_STATE ((THPTensor*)$arg4)->cdata, 0);
THTensor_(resize1d)(LIBRARY_STATE ((THPTensor*)$arg0)->cdata, s);
arguments:
- arg: THTensor* result
allocate: True
- CONSTANT AS_REAL(0)
- argument 0
- CONSTANT AS_REAL(1)
- THTensor* mat
- THTensor* vec
]]
[[
name: mm
only_stateless: True
cname: addmm
return: argument 0
before_call: |
long s1 = THTensor_(size)(LIBRARY_STATE ((THPTensor*)$arg4)->cdata, 0);
long s2 = THTensor_(size)(LIBRARY_STATE ((THPTensor*)$arg5)->cdata, 1);
THTensor_(resize2d)(LIBRARY_STATE ((THPTensor*)$arg0)->cdata, s1, s2);
arguments:
- arg: THTensor* result
allocate: True
- CONSTANT AS_REAL(0)
- argument 0
- CONSTANT AS_REAL(1)
- THTensor* mat1
- THTensor* mat2
]]
[[
name: bmm
only_stateless: True
cname: baddbmm
return: argument 0
before_call: |
long s1 = THTensor_(size)(LIBRARY_STATE ((THPTensor*)$arg4)->cdata, 0);
long s2 = THTensor_(size)(LIBRARY_STATE ((THPTensor*)$arg4)->cdata, 1);
long s3 = THTensor_(size)(LIBRARY_STATE ((THPTensor*)$arg5)->cdata, 2);
THTensor_(resize3d)(LIBRARY_STATE ((THPTensor*)$arg0)->cdata, s1, s2, s3);
arguments:
- arg: THTensor* result
allocate: True
- CONSTANT AS_REAL(0)
- argument 0
- CONSTANT AS_REAL(1)
- THTensor* mat1
- THTensor* mat2
]]
[[
name: addbmm
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- arg: real beta
default: AS_REAL(1)
- THTensor* self
- arg: real alpha
default: AS_REAL(1)
- THTensor* batch1
- THTensor* batch2
]]
[[
name: addbmm_
cname: addbmm
return: self
arguments:
- THTensor* self
- arg: real beta
default: AS_REAL(1)
- THTensor* self
- arg: real alpha
default: AS_REAL(1)
- THTensor* batch1
- THTensor* batch2
]]
[[
name: baddbmm
with_stateless: true
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- arg: real beta
default: AS_REAL(1)
- THTensor* self
- arg: real alpha
default: AS_REAL(1)
- THTensor* batch1
- THTensor* batch2
]]
[[
name: baddbmm_
cname: baddbmm
return: argument 0
arguments:
- THTensor* self
- arg: real beta
default: AS_REAL(1)
- THTensor* self
- arg: real alpha
default: AS_REAL(1)
- THTensor* batch1
- THTensor* batch2
]]
[[
name: addcmul
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- arg: real value
default: AS_REAL(1)
- THTensor* tensor1
- THTensor* tensor2
]]
[[
name: addcmul_
defined_if: CUDA_FLOAT || !IS_CUDA
cname: addcmul
return: argument 0
arguments:
- THTensor* self
- THTensor* self
- arg: real value
default: AS_REAL(1)
- THTensor* tensor1
- THTensor* tensor2
]]
[[
name: addcdiv
defined_if: CUDA_FLOAT || !IS_CUDA
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- arg: real value
default: AS_REAL(1)
- THTensor* tensor1
- THTensor* tensor2
]]
[[
name: addcdiv_
defined_if: CUDA_FLOAT || !IS_CUDA
cname: addcdiv
return: argument 0
arguments:
- THTensor* self
- THTensor* self
- arg: real value
default: AS_REAL(1)
- THTensor* tensor1
- THTensor* tensor2
]]
[[
name: randperm
defined_if: "!IS_CUDA"
only_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- long n
]]
#if !IS_CUDA
static void THTensor_(random2__)(THTensor *self, THGenerator *gen, long a, long b)
{
THArgCheck(b >= a, 2, "upper bound must be larger than lower bound");
TH_TENSOR_APPLY(real, self, *self_data = ((THRandom_random(gen) % (b+1-a)) + a);)
}
static void THTensor_(random1__)(THTensor *self, THGenerator *gen, long b)
{
THArgCheck(b > 0, 1, "upper bound must be strictly positive");
TH_TENSOR_APPLY(real, self, *self_data = (THRandom_random(gen) % b + 1);)
}
#endif
[[
name: random_
defined_if: "!IS_CUDA"
return: self
options:
- cname: random
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- cname: random1__
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- long to
- cname: random2__
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- long from
- long to
]]
[[
name: multinomial
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
with_stateless: True
return: argument 0
arguments:
- arg: THLongTensor* result
allocate: True
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- THTensor* self
- long num_samples
- arg: bool replacement
default: "false"
]]
[[
name: uniform_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
cname: uniform
return: self
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- arg: real from
default: 0
- arg: real to
default: 1
]]
[[
name: normal_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
cname: normal
return: self
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- arg: real mean
default: 0
- arg: real var
default: 1
]]
[[
name: cauchy_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
cname: cauchy
return: self
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- arg: real location
default: 0
- arg: real scale
default: 1
]]
[[
name: logNormal_
cname: logNormal
python_name: log_normal_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
return: self
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- arg: real location
default: 1
- arg: real scale
default: 2
]]
[[
name: exponential_
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
cname: exponential
return: self
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- arg: real lambd
default: 1
]]
[[
name: rand
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
only_stateless: True
long_args: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- THLongStorage* long_args
]]
[[
name: randn
defined_if: defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
only_stateless: True
long_args: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- THLongStorage* long_args
]]
[[
name: multinomial
defined_if: CUDA_FLOAT
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THTensor* self
- long num_samples
- arg: bool replacement
default: "false"
]]
[[
name: uniform_
defined_if: CUDA_FLOAT
cname: uniform
return: self
arguments:
- THTensor* self
- arg: real from
default: 0
- arg: real to
default: 1
]]
[[
name: normal_
defined_if: CUDA_FLOAT
cname: normal
return: self
arguments:
- THTensor* self
- arg: real mean
default: 0
- arg: real var
default: 1
]]
[[
name: cauchy_
defined_if: CUDA_FLOAT
cname: cauchy
return: self
arguments:
- THTensor* self
- arg: real location
default: 0
- arg: real scale
default: 1
]]
[[
name: logNormal_
cname: logNormal
python_name: log_normal_
defined_if: CUDA_FLOAT
return: self
arguments:
- THTensor* self
- arg: real location
default: 1
- arg: real scale
default: 2
]]
[[
name: exponential_
defined_if: CUDA_FLOAT
cname: exponential
return: self
arguments:
- THTensor* self
- arg: real lambd
default: 1
]]
[[
name: rand
defined_if: CUDA_FLOAT
only_stateless: True
long_args: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THLongStorage* long_args
]]
[[
name: randn
defined_if: CUDA_FLOAT
only_stateless: True
long_args: True
return: argument 0
arguments:
- arg: THTensor* result
allocate: True
- THLongStorage* long_args
]]
[[
name: geometric_
defined_if: "!IS_CUDA"
cname: geometric
return: self
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- double p
]]
[[
name: bernoulli_
defined_if: "!IS_CUDA"
return: self
options:
- cname: bernoulli
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- arg: double p
default: 0.5
- cname: bernoulli_FloatTensor
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- THFloatTensor* float_p
- cname: bernoulli_DoubleTensor
arguments:
- THTensor* self
- arg: THGenerator* generator
default: THPDefaultGenerator->cdata
- THDoubleTensor* float_p
]]
[[
name: geometric_
cname: geometric
defined_if: CUDA_FLOAT
return: self
arguments:
- THTensor* self
- double p
]]
[[
name: bernoulli_
defined_if: CUDA_FLOAT
cname: bernoulli
return: self
arguments:
- THTensor* self
- arg: double p
default: 0.5
]]
[[
name: THPTensor_(cat)
python_name: cat
only_register: True
defined_if: CUDA_FLOAT || !IS_CUDA
]]
[[
name: THPTensor_stateless_(cat)
python_name: cat
only_register: True
only_stateless: True
defined_if: CUDA_FLOAT || !IS_CUDA
]]
#if !IS_CUDA || CUDA_FLOAT
static std::pair<std::vector<THPObjectPtr>, std::vector<THTensor *>>
THPTensor_(_iterableTensors)(PyObject *iterable)
{
THPObjectPtr iterator;
THPObjectPtr item;
std::vector<THPObjectPtr> items;
std::vector<THTensor *> item_tensors;
if ((iterator = PyObject_GetIter(iterable))) {
while((item = PyIter_Next(iterator))) {
if (!THPTensor_(Check)(item)) {
THPUtils_setError("expected an iterable of " THPTensorStr ", but found %s in it", Py_TYPE(item)->tp_name);
throw std::exception();
}
item_tensors.push_back(((THPTensor*)item.get())->cdata);
items.emplace_back(std::move(item));
}
} else {
throw std::invalid_argument("");
}
return std::make_pair(std::move(items), std::move(item_tensors));
}
static PyObject * THPTensor_(cat)(THPTensor *self, PyObject *args)
{
#if IS_CUDA && THCP_AUTO_GPU
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args);
#endif
HANDLE_TH_ERRORS
THPTensor *tensor1;
THPTensor *tensor2;
long dimension;
Py_ssize_t _argcount = PyTuple_Size(args);
if (_argcount == 2) {
PyObject *iterable = PyTuple_GET_ITEM(args, 0);
PyObject *dim = PyTuple_GET_ITEM(args, 1);
std::vector<THPObjectPtr> items;
std::vector<THTensor *> item_tensors;
if (THPUtils_checkLong(dim)) {
dimension = THPUtils_unpackLong(dim);
try {
std::tie(items, item_tensors) = THPTensor_(_iterableTensors)(iterable);
THTensor_(catArray)(LIBRARY_STATE self->cdata, item_tensors.data(), items.size(), dimension);
Py_INCREF(self);
return (PyObject*)self;
} catch (std::invalid_argument &e) {
} catch (std::exception &e) {
return NULL;
}
}
} else if (_argcount == 3) {
if (PyArg_ParseTuple(args, "O!O!l", THPTensorClass, &tensor1, THPTensorClass, &tensor2, &dimension)) {
THTensor_(cat)(LIBRARY_STATE self->cdata, tensor1->cdata, tensor2->cdata, dimension);
Py_INCREF(self);
return (PyObject*)self;
}
}
// TODO: describe args
THPUtils_invalidArguments(args, "cat", 1, "(TODO)");
return NULL;
END_HANDLE_TH_ERRORS
}
static PyObject * THPTensor_stateless_(cat)(THPTensor *_unused, PyObject *args)
{
#if IS_CUDA && THCP_AUTO_GPU
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args);
#endif
HANDLE_TH_ERRORS
THPTensor *tensor1;
THPTensor *tensor2;
long dimension;
Py_ssize_t _argcount = PyTuple_Size(args);
if (_argcount == 2) {
THTensorPtr _self = THTensor_(new)(LIBRARY_STATE_NOARGS);
THPTensorPtr self = (THPTensor*)THPTensor_(New)(_self);
if (!self)
return NULL;
_self.release();
PyObject *iterable = PyTuple_GET_ITEM(args, 0);
PyObject *dim = PyTuple_GET_ITEM(args, 1);
std::vector<THPObjectPtr> items;
std::vector<THTensor *> item_tensors;
if (THPUtils_checkLong(dim)) {
dimension = THPUtils_unpackLong(dim);
try {
std::tie(items, item_tensors) = THPTensor_(_iterableTensors)(iterable);
THTensor_(catArray)(LIBRARY_STATE self->cdata, item_tensors.data(), items.size(), dimension);
return (PyObject*)self.release();
} catch (std::invalid_argument &e) {
} catch (std::exception &e) {
return NULL;
}
}
} else if (_argcount == 3) {
if (PyArg_ParseTuple(args, "O!O!l", THPTensorClass, &tensor1, THPTensorClass, &tensor2, &dimension)) {
THTensorPtr _self = THTensor_(new)(LIBRARY_STATE_NOARGS);
THPTensorPtr self = (THPTensor*)THPTensor_(New)(_self.get());
if (!self)
return NULL;
_self.release();
THTensor_(cat)(LIBRARY_STATE self->cdata, tensor1->cdata, tensor2->cdata, dimension);
return (PyObject*)self.release();
} else {
PyErr_Clear();
THPTensor *self = (THPTensor*)PyTuple_GET_ITEM(args, 0);
PyObject *iterable = PyTuple_GET_ITEM(args, 1);
PyObject *dim = PyTuple_GET_ITEM(args, 2);
std::vector<THPObjectPtr> items;
std::vector<THTensor *> item_tensors;
if (THPUtils_checkLong(dim)) {
dimension = THPUtils_unpackLong(dim);
try {
std::tie(items, item_tensors) = THPTensor_(_iterableTensors)(iterable);
THTensor_(catArray)(LIBRARY_STATE self->cdata, item_tensors.data(), items.size(), dimension);
Py_INCREF(self);
return (PyObject*)self;
} catch (std::invalid_argument &e) {
} catch (std::exception &e) {
// FIXME TODO: that's not a good way of handling errors
fprintf(stderr, "e: %s\n", e.what());
return NULL;
}
}
}
} else if (_argcount == 4) {
THPTensor *self;
if (PyArg_ParseTuple(args, "O!O!O!l", THPTensorClass, &self, THPTensorClass, &tensor1, THPTensorClass, &tensor2, &dimension)) {
THTensor_(cat)(LIBRARY_STATE self->cdata, tensor1->cdata, tensor2->cdata, dimension);
Py_INCREF(self);
return (PyObject*)self;
}
}
// TODO: describe args
THPUtils_invalidArguments(args, "cat", 1, "(TODO)");
return NULL;
END_HANDLE_TH_ERRORS
}
#endif
// cwrap should put definitions before undefs, so let's mark this place
// PUT DEFINITIONS IN HERE PLEASE
#undef IS_CUDA
#undef CUDA_INT
#undef CUDA_LONG
#undef CUDA_FLOAT
#undef CUDA_DOUBLE
#undef CUDA_HALF
#undef THPIndexTensor
#undef THPIndexTensorClass
#undef THPBoolTensor
#undef THPBoolTensorClass
#undef RealStr
#undef AS_REAL