python style fixes
diff --git a/extract_cwrap.py b/extract_cwrap.py
index 0fab951..64c2281 100644
--- a/extract_cwrap.py
+++ b/extract_cwrap.py
@@ -6,16 +6,16 @@
options, _ = parser.parse_args()
files = [
- #'../../csrc/cudnn/cuDNN.cwrap',
+ # '../../csrc/cudnn/cuDNN.cwrap',
'../../csrc/generic/TensorMethods.cwrap',
- #'../../csrc/generic/methods/SparseTensor.cwrap',
+ # '../../csrc/generic/methods/SparseTensor.cwrap',
'../../csrc/generic/methods/Tensor.cwrap',
'../../csrc/generic/methods/TensorApply.cwrap',
'../../csrc/generic/methods/TensorCompare.cwrap',
'../../csrc/generic/methods/TensorCuda.cwrap',
'../../csrc/generic/methods/TensorMath.cwrap',
'../../csrc/generic/methods/TensorRandom.cwrap',
- #'../../csrc/generic/methods/TensorSerialization.cwrap',
+ # '../../csrc/generic/methods/TensorSerialization.cwrap',
]
declaration_lines = []
@@ -35,4 +35,4 @@
declaration_lines.append(line)
with open(options.output, 'w') as output:
- output.write('\n'.join(declaration_lines)+'\n')
+ output.write('\n'.join(declaration_lines) + '\n')
diff --git a/function_wrapper.py b/function_wrapper.py
index 960b77c..335bbb7 100644
--- a/function_wrapper.py
+++ b/function_wrapper.py
@@ -1,5 +1,4 @@
import re
-import yaml
from code_template import CodeTemplate
# temporary things we cannot handle
@@ -58,6 +57,7 @@
}
""")
+
class NYIError(Exception):
"""Indicates we don't support this declaration yet"""
@@ -302,18 +302,18 @@
return ret['type'] == 'long' or (backend_type_env['ScalarName'] == 'Long' and
ret['type'] == 'real' or ret['type'] == 'accreal')
- def handle_zero_dim(env,option):
+ def handle_zero_dim(env, option):
if 'zero_dim_dispatch_when_scalar' not in option:
return []
check_name = option['zero_dim_dispatch_when_scalar']
- zero_dim_actuals = [ arg['name']
- if arg['name'] != check_name else "Scalar({})".format(arg['name'])
- for arg in option['formals_list'] ]
- return [ ZERO_DIM_CHECK.substitute(env,check_name = check_name, zero_dim_actuals=zero_dim_actuals) ]
+ zero_dim_actuals = [arg['name']
+ if arg['name'] != check_name else "Scalar({})".format(arg['name'])
+ for arg in option['formals_list']]
+ return [ZERO_DIM_CHECK.substitute(env, check_name=check_name, zero_dim_actuals=zero_dim_actuals)]
def emit_body(env, option):
body = []
- body += handle_zero_dim(env,option)
+ body += handle_zero_dim(env, option)
# arguments are potentially duplicated because of one argument
# referencing another
seen_names = set()
@@ -354,7 +354,7 @@
# resize tensors for special ops that require it
if 'resize' in arg:
resize = arg['resize']
- if type(resize) == str:
+ if isinstance(resize, str):
body.append("{}.resize_({}.sizes());".format(
arg['name'], resize))
else:
@@ -395,7 +395,7 @@
if ret['kind'] == 'arguments':
if 'aten_custom_call' in option:
- scalar_check = None # all aten_custom_call bodies handle settings on their own.
+ scalar_check = None # all aten_custom_call bodies handle settings on their own.
body.append(CodeTemplate(option['aten_custom_call']).substitute(env))
else:
body.append(call + ";")
@@ -407,7 +407,7 @@
body.append("bool maybe_scalar = {};".format(scalar_check))
scalar_check = 'maybe_scalar'
for arg in arguments:
- body.append("{}_->maybeScalar({});".format(arg['name'],scalar_check))
+ body.append("{}_->maybeScalar({});".format(arg['name'], scalar_check))
if len(arguments_indices) == 1:
arg = arguments[0]
body.append("return {};".format(arg['name']))
@@ -422,8 +422,8 @@
maybe_scalar = "->maybeScalar({})".format(scalar_check) \
if scalar_check is not None \
else ""
- body.append(CodeTemplate(
- "return Tensor((new ${Tensor}(context,${arg_name}))${maybe_scalar},false);").substitute(env, arg_name=call,maybe_scalar=maybe_scalar))
+ return_tensor = "return Tensor((new ${Tensor}(context,${arg_name}))${maybe_scalar},false);"
+ body.append(CodeTemplate(return_tensor).substitute(env, arg_name=call, maybe_scalar=maybe_scalar))
else:
# we using int64_t for long in the API, so correct it here...
if is_actual_return_long(ret):
diff --git a/gen.py b/gen.py
index 92f4384..9d4fa85 100644
--- a/gen.py
+++ b/gen.py
@@ -1,6 +1,4 @@
-import os
import sys
-import yaml
from optparse import OptionParser
import cwrap_parser
@@ -64,7 +62,7 @@
if not options.no_cuda:
backends.append('CUDA')
-densities = ['Dense','Sparse']
+densities = ['Dense', 'Sparse']
scalar_types = [
('Byte', 'uint8_t', 'Long', 'unsigned char'),
@@ -113,7 +111,7 @@
env['Storage'] = "{}{}Storage".format(backend, scalar_name)
env['Type'] = "{}{}{}Type".format(density_tag, backend, scalar_name)
env['Tensor'] = "{}{}{}Tensor".format(density_tag, backend, scalar_name)
- env['Backend'] = density_tag+backend
+ env['Backend'] = density_tag + backend
# used for generating switch logic for external functions
tag = density_tag + backend + scalar_name
@@ -149,7 +147,7 @@
env['THType'] = scalar_name
env['THStorage'] = "TH{}Storage".format(scalar_name)
- env['THTensor'] = 'TH{}{}Tensor'.format(th_density_tag,scalar_name)
+ env['THTensor'] = 'TH{}{}Tensor'.format(th_density_tag, scalar_name)
env['THIndexTensor'] = 'THLongTensor'
env['state'] = []
env['isCUDA'] = 'false'
@@ -193,7 +191,7 @@
write(env['Tensor'] + ".h", TENSOR_DERIVED_H.substitute(env))
type_register = (('context->type_registry[static_cast<int>(Backend::{})]' +
- '[static_cast<int>(ScalarType::{})].reset(new {}(context));')
+ '[static_cast<int>(ScalarType::{})].reset(new {}(context));')
.format(env['Backend'], scalar_name, env['Type']))
top_env['type_registrations'].append(type_register)
top_env['type_headers'].append(
diff --git a/nn_parse.py b/nn_parse.py
index 9daa058..f92b72d 100644
--- a/nn_parse.py
+++ b/nn_parse.py
@@ -1,4 +1,3 @@
-import yaml
import re
import common_with_cwrap
from collections import OrderedDict
diff --git a/preprocess_declarations.py b/preprocess_declarations.py
index 5814bf6..2d77302 100644
--- a/preprocess_declarations.py
+++ b/preprocess_declarations.py
@@ -2,7 +2,6 @@
from copy import deepcopy
from function_wrapper import TYPE_FORMAL_GENERIC
import common_with_cwrap
-import yaml
type_map = {
'floating_point': [
@@ -23,7 +22,8 @@
type_map['all'] = all_types
all_backends = ['CPU', 'CUDA', 'SparseCPU', 'SparseCUDA']
-default_backends = ['CPU', 'CUDA']
+default_backends = ['CPU', 'CUDA']
+
def process_types_and_backends(option):
# if specific pairs were not listed, then enumerate them
@@ -117,23 +117,25 @@
# where 'name' is the name of the argument that should be a scalar
# during dispatch, if that argument is marked internally as holding a scalar
# then the method will dispatch to that function.
+
+
def discover_zero_dim_tensor_operations(declaration):
def exclude(arg):
return arg.get('ignore_check')
- def signature(option,i=None,value=None):
- elements = [TYPE_FORMAL_GENERIC.get(arg['type'],arg['type'])
+ def signature(option, i=None, value=None):
+ elements = [TYPE_FORMAL_GENERIC.get(arg['type'], arg['type'])
if i is None or j != i else value
for j, arg in enumerate(option['arguments'])
- if not exclude(arg) ]
+ if not exclude(arg)]
return '#'.join(elements)
signature_to_option = {signature(option): option
for option in declaration['options']}
for option in declaration['options']:
- for i,arg in enumerate(option['arguments']):
+ for i, arg in enumerate(option['arguments']):
if arg['type'] == 'real':
- signature_of_tensor_version = signature(option,i,'Tensor &')
+ signature_of_tensor_version = signature(option, i, 'Tensor &')
if signature_of_tensor_version in signature_to_option:
tensor_version = \
signature_to_option[signature_of_tensor_version]
@@ -160,7 +162,6 @@
common_with_cwrap.sort_by_number_of_options(declaration)
discover_zero_dim_tensor_operations(declaration)
- new_options = []
for option in declaration['options']:
set_mode(option)
sanitize_return(option)