torch.Tensor

apply_

NameAutograddefined if
apply_noAll CPU Types

No Arguments

Returns : nothing

element_size

NameAutograddefined if
element_sizeyesAll Types (CPU and CUDA)

No Arguments

Returns : nothing

index

NameAutograddefined if
indexnoAll Types (CPU and CUDA)

No Arguments

Returns : nothing

map_

NameAutograddefined if
map_noAll CPU Types

No Arguments

Returns : nothing

map2_

NameAutograddefined if
map2_noAll CPU Types

No Arguments

Returns : nothing

dim

NameAutograddefined if
dimyesAll Types (CPU and CUDA)

No Arguments

Returns : nothing

new

NameAutograddefined if
newnoIS_CUDA

No Arguments

Returns : nothing

nelement

NameAutograddefined if
nelementyesAll Types (CPU and CUDA)

No Arguments

Returns : nothing

select

NameAutograddefined if
selectyesAll Types (CPU and CUDA)

No Arguments

Returns : nothing

set_index

NameAutograddefined if
set_indexnoAll Types (CPU and CUDA)

No Arguments

Returns : nothing

size

NameAutograddefined if
sizeyesAll Types (CPU and CUDA)

No Arguments

Returns : nothing

storage

NameAutograddefined if
storagenoAll Types (CPU and CUDA)

No Arguments

Returns : nothing

stride

NameAutograddefined if
strideyesAll Types (CPU and CUDA)

No Arguments

Returns : nothing

numpy

NameAutograddefined if
numpynoByte // Short // Int // Long // Float // Double

No Arguments

Returns : nothing

cat

NameAutograddefined if
catnoAll Types (CPU and CUDA)

No Arguments

Returns : nothing

abs

NameAutograddefined if
abs // abs_yesFloat // Double // Long // Int // Cuda_Float // Cuda_Half // Cuda_Double // Cuda_Int // Cuda_Long

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]

Returns : argument 0

acos

NameAutograddefined if
acos // acos_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

add

NameAutograddefined if
add // add_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
valuereal[required]

Returns : argument 0

addbmm

NameAutograddefined if
addbmm // addbmm_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
betareal1
selfTensor[required]
alphareal1
batch1Tensor[required]
batch2Tensor[required]

Returns : argument 0

addcdiv

NameAutograddefined if
addcdiv // addcdiv_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
valuereal1
tensor1Tensor[required]
tensor2Tensor[required]

Returns : argument 0

addcmul

NameAutograddefined if
addcmul // addcmul_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
valuereal1
tensor1Tensor[required]
tensor2Tensor[required]

Returns : argument 0

addmm

NameAutograddefined if
addmm // addmm_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
betareal1
selfTensor[required]
alphareal1
mat1Tensor[required]
mat2Tensor[required]

Returns : argument 0

addmv

NameAutograddefined if
addmv // addmv_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
betareal1
selfTensor[required]
alphareal1
matTensor[required]
vecTensor[required]

Returns : argument 0

addr

NameAutograddefined if
addr // addr_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
betareal1
selfTensor[required]
alphareal1
vec1Tensor[required]
vec2Tensor[required]

Returns : argument 0

all

NameAutograddefined if
allnoByte // defined(THC_REAL_IS_BYTE)

Arguments

NameTypeDefault
selfTensor[required]

Returns : bool

any

NameAutograddefined if
anynoByte // defined(THC_REAL_IS_BYTE)

Arguments

NameTypeDefault
selfTensor[required]

Returns : bool

asin

NameAutograddefined if
asin // asin_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

atan

NameAutograddefined if
atan // atan_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

atan2

NameAutograddefined if
atan2 // atan2_noFloat // Double // Cuda_Float

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]
otherTensor[required]

Returns : argument 0

baddbmm

NameAutograddefined if
baddbmm // baddbmm_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
betareal1
selfTensor[required]
alphareal1
batch1Tensor[required]
batch2Tensor[required]

Returns : argument 0

bernoulli_

NameAutograddefined if
bernoulli_noCuda_Float // Cuda_Double // Cuda_Half

Arguments

NameTypeDefault
selfTensor[required]
pdouble0.5

Returns : self

bmm

NameAutograddefined if
bmmyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
AS_REAL(0)CONSTANT[required]
0Tensor[required]
AS_REAL(1)CONSTANT[required]
selfTensor[required]
mat2Tensor[required]

Returns : argument 0

cauchy_

NameAutograddefined if
cauchy_noCuda_Float // Cuda_Double // Cuda_Half

Arguments

NameTypeDefault
selfTensor[required]
locationdouble0
scaledouble1

Returns : self

ceil

NameAutograddefined if
ceil // ceil_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

cinv

NameAutograddefined if
cinv // cinv_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]

Returns : nothing

clamp

NameAutograddefined if
clamp // clamp_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]
minreal[required]
maxreal[required]

Returns : argument 0

clone

NameAutograddefined if
cloneyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : Tensor

cmax

NameAutograddefined if
cmax // cmax_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
otherTensor[required]

Returns : argument 0

cmin

NameAutograddefined if
cmin // cmin_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
otherTensor[required]

Returns : argument 0

contiguous

NameAutograddefined if
contiguousyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : Tensor

cos

NameAutograddefined if
cos // cos_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

cosh

NameAutograddefined if
cosh // cosh_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

cross

NameAutograddefined if
crossnoAll Types (CPU and CUDA)

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]
otherTensor[required]
dimlong-1

Returns : argument 0

cumprod

NameAutograddefined if
cumprodnoAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
dimlong[required]

Returns : argument 0

cumsum

NameAutograddefined if
cumsumnoAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
dimlong[required]

Returns : argument 0

data_ptr

NameAutograddefined if
data_ptrnoAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : void*

diag

NameAutograddefined if
diagyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
diagonallong0

Returns : argument 0

dist

NameAutograddefined if
distyesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
selfTensor[required]
otherTensor[required]
preal2

Returns : nothing

div

NameAutograddefined if
div // div_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
valuereal[required]

Returns : argument 0

dot

NameAutograddefined if
dotyesCuda_Float // Cuda_Double // Cuda_Half // All CPU Types

Arguments

NameTypeDefault
selfTensor[required]
tensorTensor[required]

Returns : accreal

eig

NameAutograddefined if
eignoFloat // Double // Cuda_Float

Arguments

NameTypeDefault
res1Tensor[optional]
res2Tensor[optional]
selfTensor[required]
eigenvectorsboolN

Returns : argument 0,1

eq

NameAutograddefined if
eq // eq_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultByteTensor[optional]
tensorTensor[required]
valuereal[required]

Returns : argument 0

equal

NameAutograddefined if
equalnoAll CPU Types

Arguments

NameTypeDefault
selfTensor[required]
otherTensor[required]

Returns : bool

exp

NameAutograddefined if
exp // exp_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

exponential_

NameAutograddefined if
exponential_noCuda_Float // Cuda_Double // Cuda_Half

Arguments

NameTypeDefault
selfTensor[required]
lambddouble1

Returns : self

eye

NameAutograddefined if
eyenoAll CPU Types

Arguments

NameTypeDefault
resultTensor[optional]
nlong[required]
1long[required]

Returns : argument 0

fill_

NameAutograddefined if
fill_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
valuereal[required]

Returns : self

floor

NameAutograddefined if
floor // floor_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

fmod

NameAutograddefined if
fmod // fmod_yesAll CPU Types

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
valuereal[required]

Returns : argument 0

frac

NameAutograddefined if
frac // frac_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

free

NameAutograddefined if
freenoAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : self

gather

NameAutograddefined if
gathernoAll CPU Types

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
dimlong[required]
indexLongTensor[required]

Returns : argument 0

ge

NameAutograddefined if
ge // ge_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultByteTensor[optional]
tensorTensor[required]
valuereal[required]

Returns : argument 0

gels

NameAutograddefined if
gelsnoFloat // Double // Cuda_Float

Arguments

NameTypeDefault
res1Tensor[optional]
res2Tensor[optional]
selfTensor[required]
ATensor[required]

Returns : argument 0,1

geometric_

NameAutograddefined if
geometric_noCuda_Float // Cuda_Double // Cuda_Half

Arguments

NameTypeDefault
selfTensor[required]
pdouble[required]

Returns : self

geqrf

NameAutograddefined if
geqrfnoFloat // Double

Arguments

NameTypeDefault
res1Tensor[optional]
res2Tensor[optional]
selfTensor[required]

Returns : argument 0,1

ger

NameAutograddefined if
geryesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
AS_REAL(0)CONSTANT[required]
0Tensor[required]
AS_REAL(1)CONSTANT[required]
selfTensor[required]
vec2Tensor[required]

Returns : argument 0

gesv

NameAutograddefined if
gesvnoFloat // Double // Cuda_Float

Arguments

NameTypeDefault
solutionTensor[optional]
luTensor[optional]
selfTensor[required]
ATensor[required]

Returns : argument 0,1

get_device

NameAutograddefined if
get_devicenoIS_CUDA

Arguments

NameTypeDefault
selfTensor[required]

Returns : long

gt

NameAutograddefined if
gt // gt_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultByteTensor[optional]
tensorTensor[required]
valuereal[required]

Returns : argument 0

histc

NameAutograddefined if
histcnoFloat // Double

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]
100CONSTANT[required]
0CONSTANT[required]
0CONSTANT[required]

Returns : argument 0

index_add_

NameAutograddefined if
index_add_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
dimlong[required]
indexLongTensor[required]
sourceTensor[required]

Returns : argument 0

index_copy_

NameAutograddefined if
index_copy_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
dimlong[required]
indexLongTensor[required]
sourceTensor[required]

Returns : argument 0

index_fill_

NameAutograddefined if
index_fill_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
dimlong[required]
indexLongTensor[required]
valuereal[required]

Returns : argument 0

index_select

NameAutograddefined if
index_selectyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
dimlong[required]
indexLongTensor[required]

Returns : argument 0

inverse

NameAutograddefined if
inversenoFloat // Double // Cuda_Float

Arguments

NameTypeDefault
outputTensor[optional]
selfTensor[required]

Returns : argument 0

is_contiguous

NameAutograddefined if
is_contiguousyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : bool

is_same_size

NameAutograddefined if
is_same_sizeyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
otherTensor[required]

Returns : bool

is_set_to

NameAutograddefined if
is_set_toyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
tensorTensor[required]

Returns : bool

kthvalue

NameAutograddefined if
kthvalueyesAll CPU Types

Arguments

NameTypeDefault
valuesTensor[optional]
indicesLongTensor[optional]
selfTensor[required]
klong[required]
__last_dimCONSTANT[required]

Returns : argument 0,1

le

NameAutograddefined if
le // le_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultByteTensor[optional]
tensorTensor[required]
valuereal[required]

Returns : argument 0

lerp

NameAutograddefined if
lerp // lerp_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]
endTensor[required]
weightreal[required]

Returns : argument 0

linspace

NameAutograddefined if
linspacenoFloat // Double

Arguments

NameTypeDefault
resultTensor[optional]
startreal[required]
endreal[required]
stepslong100

Returns : argument 0

log

NameAutograddefined if
log // log_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

log1p

NameAutograddefined if
log1p // log1p_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

log_normal_

NameAutograddefined if
log_normal_noCuda_Float // Cuda_Double // Cuda_Half

Arguments

NameTypeDefault
selfTensor[required]
locationdouble1
scaledouble2

Returns : self

logspace

NameAutograddefined if
logspacenoFloat // Double

Arguments

NameTypeDefault
resultTensor[optional]
startreal[required]
endreal[required]
stepslong100

Returns : argument 0

lt

NameAutograddefined if
lt // lt_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultByteTensor[optional]
tensorTensor[required]
valuereal[required]

Returns : argument 0

masked_copy_

NameAutograddefined if
masked_copy_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
maskByteTensor[required]
sourceTensor[required]

Returns : self

masked_fill_

NameAutograddefined if
masked_fill_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
maskByteTensor[required]
valuereal[required]

Returns : self

masked_select

NameAutograddefined if
masked_selectyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
maskByteTensor[required]

Returns : argument 0

max

NameAutograddefined if
maxyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : nothing

mean

NameAutograddefined if
meanyesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
selfTensor[required]

Returns : nothing

median

NameAutograddefined if
medianyesAll CPU Types

Arguments

NameTypeDefault
valuesTensor[optional]
indicesLongTensor[optional]
selfTensor[required]
__last_dimCONSTANT[required]

Returns : argument 0,1

min

NameAutograddefined if
minyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : nothing

mm

NameAutograddefined if
mmyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
AS_REAL(0)CONSTANT[required]
0Tensor[required]
AS_REAL(1)CONSTANT[required]
selfTensor[required]
mat2Tensor[required]

Returns : argument 0

mode

NameAutograddefined if
modeyesAll CPU Types

Arguments

NameTypeDefault
valuesTensor[optional]
indicesLongTensor[optional]
selfTensor[required]
__last_dimCONSTANT[required]

Returns : argument 0,1

mul

NameAutograddefined if
mul // mul_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
valuereal[required]

Returns : argument 0

multinomial

NameAutograddefined if
multinomialnoCuda_Float // Cuda_Double // Cuda_Half

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
num_sampleslong[required]
replacementboolfalse

Returns : argument 0

mv

NameAutograddefined if
mvyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
AS_REAL(0)CONSTANT[required]
0Tensor[required]
AS_REAL(1)CONSTANT[required]
selfTensor[required]
vecTensor[required]

Returns : argument 0

ndimension

NameAutograddefined if
ndimensionyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : long

narrow

NameAutograddefined if
narrowyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
dimensionlong[required]
startlong[required]
lengthlong[required]

Returns : argument 0

ne

NameAutograddefined if
ne // ne_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultByteTensor[optional]
tensorTensor[required]
valuereal[required]

Returns : argument 0

neg

NameAutograddefined if
neg // neg_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]

Returns : nothing

nonzero

NameAutograddefined if
nonzeronoAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultLongTensor[optional]
selfTensor[required]

Returns : argument 0

norm

NameAutograddefined if
normyesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
selfTensor[required]
preal2

Returns : nothing

normal_

NameAutograddefined if
normal_noCuda_Float // Cuda_Double // Cuda_Half

Arguments

NameTypeDefault
selfTensor[required]
meandouble0
vardouble1

Returns : self

numel

NameAutograddefined if
numelyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : long

ones

NameAutograddefined if
ones // ones_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
sizeTHSize*[required]

Returns : argument 0

orgqr

NameAutograddefined if
orgqrnoFloat // Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
input2Tensor[required]

Returns : argument 0,1

ormqr

NameAutograddefined if
ormqrnoFloat // Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
input2Tensor[required]
input3Tensor[required]
leftboolL
transposeboolN

Returns : argument 0,1

potrf

NameAutograddefined if
potrfnoCuda_Float

Arguments

NameTypeDefault
outputTensor[optional]
selfTensor[required]

Returns : argument 0

potri

NameAutograddefined if
potrinoCuda_Float

Arguments

NameTypeDefault
outputTensor[optional]
selfTensor[required]

Returns : argument 0

potrs

NameAutograddefined if
potrsnoCuda_Float

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
input2Tensor[required]

Returns : argument 0

pow

NameAutograddefined if
pow // pow_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]
exponentreal[required]

Returns : argument 0

prod

NameAutograddefined if
prodyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : nothing

pstrf

NameAutograddefined if
pstrfnoFloat // Double

Arguments

NameTypeDefault
res1Tensor[optional]
res2THIntTensor*[optional]
selfTensor[required]
upperboolU
tolreal-1

Returns : argument 0,1

qr

NameAutograddefined if
qrnoFloat // Double // Cuda_Float

Arguments

NameTypeDefault
res1Tensor[optional]
res2Tensor[optional]
selfTensor[required]

Returns : argument 0,1

rand

NameAutograddefined if
randnoCuda_Float // Cuda_Double // Cuda_Half

Arguments

NameTypeDefault
resultTensor[optional]
sizeTHSize*[required]

Returns : argument 0

randn

NameAutograddefined if
randnnoCuda_Float // Cuda_Double // Cuda_Half

Arguments

NameTypeDefault
resultTensor[optional]
sizeTHSize*[required]

Returns : argument 0

random_

NameAutograddefined if
random_noAll CPU Types

Arguments

NameTypeDefault
selfTensor[required]

Returns : self

randperm

NameAutograddefined if
randpermnoAll CPU Types

Arguments

NameTypeDefault
resultTensor[optional]
nlong[required]

Returns : argument 0

range

NameAutograddefined if
rangenoAll CPU Types

Arguments

NameTypeDefault
resultTensor[optional]
xminaccreal[required]
xmaxaccreal[required]
stepaccreal1

Returns : argument 0

remainder

NameAutograddefined if
remainder // remainder_yesAll CPU Types

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
valuereal[required]

Returns : argument 0

renorm

NameAutograddefined if
renorm // renorm_noFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]
preal[required]
dimlong[required]
maxnormreal[required]

Returns : nothing

resize_as_

NameAutograddefined if
resize_as_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
templateTensor[required]

Returns : self

resize_

NameAutograddefined if
resize_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
sizeTHSize*[required]
NULLCONSTANT[required]

Returns : self

retain

NameAutograddefined if
retainnoAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : self

round

NameAutograddefined if
round // round_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

rsqrt

NameAutograddefined if
rsqrt // rsqrt_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

scatter_

NameAutograddefined if
scatter_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
dimlong[required]
indexLongTensor[required]
srcTensor[required]

Returns : argument 0

set_

NameAutograddefined if
set_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
sourceTensor[required]

Returns : argument 0

sigmoid

NameAutograddefined if
sigmoid // sigmoid_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

sign

NameAutograddefined if
sign // sign_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

sin

NameAutograddefined if
sin // sin_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

sinh

NameAutograddefined if
sinh // sinh_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

sort

NameAutograddefined if
sortyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
valuesTensor[optional]
indicesLongTensor[optional]
selfTensor[required]
__last_dimCONSTANT[required]
falseCONSTANT[required]

Returns : argument 0,1

sqrt

NameAutograddefined if
sqrt // sqrt_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

squeeze

NameAutograddefined if
squeeze // squeeze_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

std

NameAutograddefined if
stdnoFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
selfTensor[required]

Returns : nothing

storage_offset

NameAutograddefined if
storage_offsetnoAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : long

sub

NameAutograddefined if
sub // sub_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
valuereal[required]

Returns : argument 0

sum

NameAutograddefined if
sumyesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : nothing

svd

NameAutograddefined if
svdnoFloat // Double // Cuda_Float

Arguments

NameTypeDefault
res1Tensor[optional]
res2Tensor[optional]
res3Tensor[optional]
selfTensor[required]
someboolS

Returns : argument 0,1,2

symeig

NameAutograddefined if
symeignoFloat // Double // Cuda_Float

Arguments

NameTypeDefault
res1Tensor[optional]
res2Tensor[optional]
selfTensor[required]
eigenvectorsboolN
upperboolU

Returns : argument 0,1

t

NameAutograddefined if
t // t_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
0CONSTANT[required]
1CONSTANT[required]

Returns : Tensor

tan

NameAutograddefined if
tan // tan_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

tanh

NameAutograddefined if
tanh // tanh_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

topk

NameAutograddefined if
topkyesCuda_Float // All CPU Types

Arguments

NameTypeDefault
valuesTensor[optional]
indicesLongTensor[optional]
selfTensor[required]
klong[required]
__last_dimCONSTANT[required]
falseCONSTANT[required]
falseCONSTANT[required]

Returns : argument 0,1

trace

NameAutograddefined if
tracenoAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : accreal

transpose

NameAutograddefined if
transpose // transpose_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]
dim0long[required]
dim1long[required]

Returns : Tensor

tril

NameAutograddefined if
tril // tril_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]
klong0

Returns : argument 0

triu

NameAutograddefined if
triu // triu_yesAll Types (CPU and CUDA)

Arguments

NameTypeDefault
destinationTensor[optional]
selfTensor[required]
klong0

Returns : argument 0

trtrs

NameAutograddefined if
trtrsnoFloat // Double

Arguments

NameTypeDefault
res1Tensor[optional]
res2Tensor[optional]
selfTensor[required]
ATensor[required]
upperboolU
transposeboolN
unitriangularboolN

Returns : argument 0,1

trunc

NameAutograddefined if
trunc // trunc_yesFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]

Returns : argument 0

unfold

NameAutograddefined if
unfoldnoAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
selfTensor[required]
dimensionlong[required]
sizelong[required]
steplong[required]

Returns : argument 0

uniform_

NameAutograddefined if
uniform_noCuda_Float // Cuda_Double // Cuda_Half

Arguments

NameTypeDefault
selfTensor[required]
fromdouble0
todouble1

Returns : self

var

NameAutograddefined if
varnoFloat // Double // Cuda_Float // Cuda_Half // Cuda_Double

Arguments

NameTypeDefault
selfTensor[required]

Returns : nothing

zero_

NameAutograddefined if
zero_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
selfTensor[required]

Returns : self

zeros

NameAutograddefined if
zeros // zeros_noAll Types (CPU and CUDA)

Arguments

NameTypeDefault
resultTensor[optional]
sizeTHSize*[required]

Returns : argument 0