Revise tests so weights and biases are no longer inputs
This CL turn them into constant operands set at model building time.
Also fixed problems in the test generator on handling FP literals in
model paramters.
Original tests are renamed with _weights_as_inputs suffix.
Bug: 67015862
Bug: 63905942
Test: NeuralNetworksTests on Angler
Change-Id: Ic59c78e7e6fcb143fb05a0360baa281b7528050c
diff --git a/runtime/test/specs/conv_float.mod.py b/runtime/test/specs/conv_float.mod.py
index fb6ebff..04e5d76 100644
--- a/runtime/test/specs/conv_float.mod.py
+++ b/runtime/test/specs/conv_float.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
-f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
-b1 = Input("op3", "TENSOR_FLOAT32", "{1}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -29,11 +29,7 @@
# Example 1. Input in operand 0,
input0 = {i1: # input 0
- [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0],
- f1:
- [.25, .25, .25, .25],
- b1:
- [0]}
+ [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0]}
output0 = {output: # output 0
[.875, .875, .875, .875]}
diff --git a/runtime/test/specs/conv_float_channels.mod.py b/runtime/test/specs/conv_float_channels.mod.py
index cc6bfcb..089a5fc 100644
--- a/runtime/test/specs/conv_float_channels.mod.py
+++ b/runtime/test/specs/conv_float_channels.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
-f1 = Input("op2", "TENSOR_FLOAT32", "{3, 1, 1, 3}")
-b1 = Input("op3", "TENSOR_FLOAT32", "{3}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{3, 1, 1, 3}", [1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{3}", [0., 0., 0.])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -29,13 +29,7 @@
# Example 1. Input in operand 0,
input0 = {i1: # input 0
- [99.0, 99.0, 99.0],
- f1:
- [1.0, 1.0, 1.0,
- 2.0, 2.0, 2.0,
- 3.0, 3.0, 3.0],
- b1:
- [0., 0., 0.]}
+ [99.0, 99.0, 99.0]}
output0 = {output: # output 0
[297., 594., 891.]}
diff --git a/runtime/test/specs/conv_float_channels_weights_as_inputs.mod.py b/runtime/test/specs/conv_float_channels_weights_as_inputs.mod.py
new file mode 100644
index 0000000..cc6bfcb
--- /dev/null
+++ b/runtime/test/specs/conv_float_channels_weights_as_inputs.mod.py
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+f1 = Input("op2", "TENSOR_FLOAT32", "{3, 1, 1, 3}")
+b1 = Input("op3", "TENSOR_FLOAT32", "{3}")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+# output dimension:
+# (i1.height - f1.height + 1) x (i1.width - f1.width + 1)
+output = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [99.0, 99.0, 99.0],
+ f1:
+ [1.0, 1.0, 1.0,
+ 2.0, 2.0, 2.0,
+ 3.0, 3.0, 3.0],
+ b1:
+ [0., 0., 0.]}
+
+output0 = {output: # output 0
+ [297., 594., 891.]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/conv_float_large.mod.py b/runtime/test/specs/conv_float_large.mod.py
index aeed75a..32c6832 100644
--- a/runtime/test/specs/conv_float_large.mod.py
+++ b/runtime/test/specs/conv_float_large.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 3, 3}")
-f1 = Input("op2", "TENSOR_FLOAT32", "{3, 1, 1, 3}")
-b1 = Input("op3", "TENSOR_FLOAT32", "{3}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{3, 1, 1, 3}", [1., 4., 7., 2., 5., 8., 3., 6., 9.])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{3}", [0., 0., 0.])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -30,13 +30,7 @@
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[ 1., 2., 3., 4., 5., 6., 7., 8., 9.,
- 10., 11., 12., 13., 14., 15., 16., 17., 18.],
- f1:
- [ 1., 4., 7.,
- 2., 5., 8.,
- 3., 6., 9.],
- b1:
- [0., 0., 0.]}
+ 10., 11., 12., 13., 14., 15., 16., 17., 18.]}
output0 = {output: # output 0
[ 30., 36., 42.,
diff --git a/runtime/test/specs/conv_float_large_weights_as_inputs.mod.py b/runtime/test/specs/conv_float_large_weights_as_inputs.mod.py
new file mode 100644
index 0000000..aeed75a
--- /dev/null
+++ b/runtime/test/specs/conv_float_large_weights_as_inputs.mod.py
@@ -0,0 +1,51 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 3, 3}")
+f1 = Input("op2", "TENSOR_FLOAT32", "{3, 1, 1, 3}")
+b1 = Input("op3", "TENSOR_FLOAT32", "{3}")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+# output dimension:
+# (i1.height - f1.height + 1) x (i1.width - f1.width + 1)
+output = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18.],
+ f1:
+ [ 1., 4., 7.,
+ 2., 5., 8.,
+ 3., 6., 9.],
+ b1:
+ [0., 0., 0.]}
+
+output0 = {output: # output 0
+ [ 30., 36., 42.,
+ 66., 81., 96.,
+ 102., 126., 150.,
+ 138., 171., 204.,
+ 174., 216., 258.,
+ 210., 261., 312.]
+ }
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/conv_float_weights_as_inputs.mod.py b/runtime/test/specs/conv_float_weights_as_inputs.mod.py
new file mode 100644
index 0000000..fb6ebff
--- /dev/null
+++ b/runtime/test/specs/conv_float_weights_as_inputs.mod.py
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+b1 = Input("op3", "TENSOR_FLOAT32", "{1}")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+# output dimension:
+# (i1.height - f1.height + 1) x (i1.width - f1.width + 1)
+output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0],
+ f1:
+ [.25, .25, .25, .25],
+ b1:
+ [0]}
+
+output0 = {output: # output 0
+ [.875, .875, .875, .875]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/conv_quant8.mod.py b/runtime/test/specs/conv_quant8.mod.py
index 1b6a709..b56e0b0 100644
--- a/runtime/test/specs/conv_quant8.mod.py
+++ b/runtime/test/specs/conv_quant8.mod.py
@@ -12,12 +12,14 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
+
+# conv_quant8.mod.py with biases and filter being constants
model = Model()
i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 3, 3, 1}, 0.5f, 0")
-f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}, 0.5f, 0")
-b1 = Input("op3", "TENSOR_INT32", "{1}, 0.25f, 0")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}, 0.5f, 0",
+ [2, 2, 2, 2])
+b1 = Parameter("op3", "TENSOR_INT32", "{1}, 0.25f, 0", [4])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -25,18 +27,19 @@
# (i1.height - f1.height + 1) x (i1.width - f1.width + 1)
output = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}, 1.f, 0")
-model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride,
+ stride, act).To(output)
# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [8, 8, 8, 8, 4, 8, 8, 8, 8],
- f1:
- [2, 2, 2, 2],
- b1:
- [4]}
+input0 = {
+ i1: # input 0
+ [8, 8, 8, 8, 4, 8, 8, 8, 8]
+}
# (i1 (conv) f1) + b1
-output0 = {output: # output 0
- [15, 15, 15, 15]}
+output0 = {
+ output: # output 0
+ [15, 15, 15, 15]
+}
# Instantiate an example
Example((input0, output0))
diff --git a/runtime/test/specs/conv_quant8_channels.mod.py b/runtime/test/specs/conv_quant8_channels.mod.py
index 90f62ab..f8cab0a 100644
--- a/runtime/test/specs/conv_quant8_channels.mod.py
+++ b/runtime/test/specs/conv_quant8_channels.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 3}, 0.5f, 0")
-f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{3, 1, 1, 3}, 0.5f, 0")
-b1 = Input("op3", "TENSOR_INT32", "{3}, 0.25, 0")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{3, 1, 1, 3}, 0.5f, 0", [1, 2, 3, 4, 5, 6, 7, 8, 9])
+b1 = Parameter("op3", "TENSOR_INT32", "{3}, 0.25, 0", [0, 0, 0])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -27,13 +27,7 @@
# Example 1. Input in operand 0,
input0 = {i1: # input 0
- [10, 10, 10],
- f1:
- [1, 2, 3,
- 4, 5, 6,
- 7, 8, 9],
- b1:
- [0, 0, 0]}
+ [10, 10, 10]}
output0 = {output: # output 0
[15, 38, 60]}
diff --git a/runtime/test/specs/conv_quant8_channels_weights_as_inputs.mod.py b/runtime/test/specs/conv_quant8_channels_weights_as_inputs.mod.py
new file mode 100644
index 0000000..90f62ab
--- /dev/null
+++ b/runtime/test/specs/conv_quant8_channels_weights_as_inputs.mod.py
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 3}, 0.5f, 0")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{3, 1, 1, 3}, 0.5f, 0")
+b1 = Input("op3", "TENSOR_INT32", "{3}, 0.25, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 3}, 1.0, 0")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [10, 10, 10],
+ f1:
+ [1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9],
+ b1:
+ [0, 0, 0]}
+
+output0 = {output: # output 0
+ [15, 38, 60]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/conv_quant8_large.mod.py b/runtime/test/specs/conv_quant8_large.mod.py
index 4ccfa00..5272d72 100644
--- a/runtime/test/specs/conv_quant8_large.mod.py
+++ b/runtime/test/specs/conv_quant8_large.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 3, 3}, 0.5, 0")
-f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{3, 1, 1, 3}, 0.5, 0")
-b1 = Input("op3", "TENSOR_INT32", "{3}, 0.25, 0")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{3, 1, 1, 3}, 0.5, 0", [1, 4, 7, 2, 5, 8, 3, 6, 9])
+b1 = Parameter("op3", "TENSOR_INT32", "{3}, 0.25, 0", [0, 0, 0])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -28,13 +28,7 @@
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[ 1, 2, 3, 4, 5, 6, 7, 8, 9,
- 10, 11, 12, 13, 14, 15, 16, 17, 18],
- f1:
- [ 1, 4, 7,
- 2, 5, 8,
- 3, 6, 9],
- b1:
- [0, 0, 0]}
+ 10, 11, 12, 13, 14, 15, 16, 17, 18]}
output0 = {output: # output 0
[ 8, 9, 11,
diff --git a/runtime/test/specs/conv_quant8_large_weights_as_inputs.mod.py b/runtime/test/specs/conv_quant8_large_weights_as_inputs.mod.py
new file mode 100644
index 0000000..4ccfa00
--- /dev/null
+++ b/runtime/test/specs/conv_quant8_large_weights_as_inputs.mod.py
@@ -0,0 +1,49 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 3, 3}, 0.5, 0")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{3, 1, 1, 3}, 0.5, 0")
+b1 = Input("op3", "TENSOR_INT32", "{3}, 0.25, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 2, 3, 3}, 1.0, 0")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [ 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18],
+ f1:
+ [ 1, 4, 7,
+ 2, 5, 8,
+ 3, 6, 9],
+ b1:
+ [0, 0, 0]}
+
+output0 = {output: # output 0
+ [ 8, 9, 11,
+ 17, 21, 24,
+ 26, 32, 38,
+ 35, 43, 51,
+ 44, 54, 65,
+ 53, 66, 78]
+ }
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/conv_quant8_overflow.mod.py b/runtime/test/specs/conv_quant8_overflow.mod.py
index 45e977a..4d4c6e4 100644
--- a/runtime/test/specs/conv_quant8_overflow.mod.py
+++ b/runtime/test/specs/conv_quant8_overflow.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 3, 3}, 0.5, 0")
-f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{3, 1, 1, 3}, 0.5, 0")
-b1 = Input("op3", "TENSOR_INT32", "{3}, 0.25, 0")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{3, 1, 1, 3}, 0.5, 0", [10, 40, 70, 20, 50, 80, 30, 60, 90])
+b1 = Parameter("op3", "TENSOR_INT32", "{3}, 0.25, 0", [0, 0, 0])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -28,13 +28,7 @@
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[ 1, 2, 3, 4, 5, 6, 7, 8, 9,
- 10, 11, 12, 13, 14, 15, 16, 17, 18],
- f1:
- [ 10, 40, 70,
- 20, 50, 80,
- 30, 60, 90],
- b1:
- [0, 0, 0]}
+ 10, 11, 12, 13, 14, 15, 16, 17, 18]}
output0 = {output: # output 0
[ 75, 90, 105,
diff --git a/runtime/test/specs/conv_quant8_overflow_weights_as_inputs.mod.py b/runtime/test/specs/conv_quant8_overflow_weights_as_inputs.mod.py
new file mode 100644
index 0000000..45e977a
--- /dev/null
+++ b/runtime/test/specs/conv_quant8_overflow_weights_as_inputs.mod.py
@@ -0,0 +1,49 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 3, 3}, 0.5, 0")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{3, 1, 1, 3}, 0.5, 0")
+b1 = Input("op3", "TENSOR_INT32", "{3}, 0.25, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 2, 3, 3}, 1.0, 0")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [ 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18],
+ f1:
+ [ 10, 40, 70,
+ 20, 50, 80,
+ 30, 60, 90],
+ b1:
+ [0, 0, 0]}
+
+output0 = {output: # output 0
+ [ 75, 90, 105,
+ 165, 203, 240,
+ 255, 255, 255,
+ 255, 255, 255,
+ 255, 255, 255,
+ 255, 255, 255]
+ }
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/conv_quant8_weights_as_inputs.mod.py b/runtime/test/specs/conv_quant8_weights_as_inputs.mod.py
new file mode 100644
index 0000000..1b6a709
--- /dev/null
+++ b/runtime/test/specs/conv_quant8_weights_as_inputs.mod.py
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 3, 3, 1}, 0.5f, 0")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}, 0.5f, 0")
+b1 = Input("op3", "TENSOR_INT32", "{1}, 0.25f, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+# output dimension:
+# (i1.height - f1.height + 1) x (i1.width - f1.width + 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}, 1.f, 0")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [8, 8, 8, 8, 4, 8, 8, 8, 8],
+ f1:
+ [2, 2, 2, 2],
+ b1:
+ [4]}
+# (i1 (conv) f1) + b1
+output0 = {output: # output 0
+ [15, 15, 15, 15]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/depthwise_conv2d_float.mod.py b/runtime/test/specs/depthwise_conv2d_float.mod.py
index 9a8af4c..d8f61f3 100644
--- a/runtime/test/specs/depthwise_conv2d_float.mod.py
+++ b/runtime/test/specs/depthwise_conv2d_float.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
-f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
-b1 = Input("op3", "TENSOR_FLOAT32", "{4}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0, .2, 0, .25, 0, 0, .3, .25, 0, 0, 0, .25, .1, 0, 0])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [1, 2, 3, 4])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -34,14 +34,7 @@
input0 = {i1: # input 0
[10, 21, 10, 22, 10, 23,
10, 24, 10, 25, 10, 26,
- 10, 27, 10, 28, 10, 29],
- f1:
- [.25, 0, .2, 0,
- .25, 0, 0, .3,
- .25, 0, 0, 0,
- .25, .1, 0, 0],
- b1:
- [1, 2, 3, 4]}
+ 10, 27, 10, 28, 10, 29]}
# (i1 (conv) f1) + b1
# filter usage:
# in_ch1 * f_1 --> output_d1
diff --git a/runtime/test/specs/depthwise_conv2d_float_large.mod.py b/runtime/test/specs/depthwise_conv2d_float_large.mod.py
index ee7ae94..38d1865 100644
--- a/runtime/test/specs/depthwise_conv2d_float_large.mod.py
+++ b/runtime/test/specs/depthwise_conv2d_float_large.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 3}") # depth_in = 3
-f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 2}") # depth_out = 2
-b1 = Input("op3", "TENSOR_FLOAT32", "{2}") # depth_out = 2
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 2}", [.25, 0, .25, 1, .25, 0, .25, 1]) # depth_out = 2
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [100, 200]) # depth_out = 2
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -34,12 +34,7 @@
input0 = {
i1: [ # input 0
10, 21, 100, 10, 22, 200,
- 10, 23, 300, 10, 24, 400],
- f1: [
- .25, 0, .25, 1,
- .25, 0, .25, 1],
- b1:
- [100, 200]
+ 10, 23, 300, 10, 24, 400]
}
# (i1 (conv) f1) + b1
output0 = {output: # output 0
diff --git a/runtime/test/specs/depthwise_conv2d_float_large_2.mod.py b/runtime/test/specs/depthwise_conv2d_float_large_2.mod.py
index b91f309..a5ad8f6 100644
--- a/runtime/test/specs/depthwise_conv2d_float_large_2.mod.py
+++ b/runtime/test/specs/depthwise_conv2d_float_large_2.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 3}") # depth_in = 3
-f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}") # depth_out = 4
-b1 = Input("op3", "TENSOR_FLOAT32", "{4}") # depth_out = 4
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0, 10, 100, .25, 1, 20, 100, .25, 0, 30, 100, .25, 1, 40, 100]) # depth_out = 4
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [600000, 700000, 800000, 900000]) # depth_out = 4
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -36,14 +36,7 @@
10, 21, 100,
10, 22, 200,
10, 23, 300,
- 10, 24, 400],
- f1: [
- .25, 0, 10, 100,
- .25, 1, 20, 100,
- .25, 0, 30, 100,
- .25, 1, 40, 100],
- b1:
- [600000, 700000, 800000, 900000]
+ 10, 24, 400]
}
# (i1 (conv) f1) + b1
output0 = {output: # output 0
diff --git a/runtime/test/specs/depthwise_conv2d_float_large_2_weights_as_inputs.mod.py b/runtime/test/specs/depthwise_conv2d_float_large_2_weights_as_inputs.mod.py
new file mode 100644
index 0000000..b91f309
--- /dev/null
+++ b/runtime/test/specs/depthwise_conv2d_float_large_2_weights_as_inputs.mod.py
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 3}") # depth_in = 3
+f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}") # depth_out = 4
+b1 = Input("op3", "TENSOR_FLOAT32", "{4}") # depth_out = 4
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+cm = Int32Scalar("channelMultiplier", 1)
+output = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 4}")
+
+model = model.Operation("DEPTHWISE_CONV_2D",
+ i1, f1, b1,
+ pad0, pad0, pad0, pad0,
+ stride, stride,
+ cm, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: [ # input 0
+ 10, 21, 100,
+ 10, 22, 200,
+ 10, 23, 300,
+ 10, 24, 400],
+ f1: [
+ .25, 0, 10, 100,
+ .25, 1, 20, 100,
+ .25, 0, 30, 100,
+ .25, 1, 40, 100],
+ b1:
+ [600000, 700000, 800000, 900000]
+ }
+# (i1 (conv) f1) + b1
+output0 = {output: # output 0
+ [600010, 700046, 830000, 900000]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/depthwise_conv2d_float_large_weights_as_inputs.mod.py b/runtime/test/specs/depthwise_conv2d_float_large_weights_as_inputs.mod.py
new file mode 100644
index 0000000..ee7ae94
--- /dev/null
+++ b/runtime/test/specs/depthwise_conv2d_float_large_weights_as_inputs.mod.py
@@ -0,0 +1,49 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 3}") # depth_in = 3
+f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 2}") # depth_out = 2
+b1 = Input("op3", "TENSOR_FLOAT32", "{2}") # depth_out = 2
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+cm = Int32Scalar("channelMultiplier", 1)
+output = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+
+model = model.Operation("DEPTHWISE_CONV_2D",
+ i1, f1, b1,
+ pad0, pad0, pad0, pad0,
+ stride, stride,
+ cm, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: [ # input 0
+ 10, 21, 100, 10, 22, 200,
+ 10, 23, 300, 10, 24, 400],
+ f1: [
+ .25, 0, .25, 1,
+ .25, 0, .25, 1],
+ b1:
+ [100, 200]
+ }
+# (i1 (conv) f1) + b1
+output0 = {output: # output 0
+ [110, 246]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/depthwise_conv2d_float_weights_as_inputs.mod.py b/runtime/test/specs/depthwise_conv2d_float_weights_as_inputs.mod.py
new file mode 100644
index 0000000..9a8af4c
--- /dev/null
+++ b/runtime/test/specs/depthwise_conv2d_float_weights_as_inputs.mod.py
@@ -0,0 +1,58 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
+f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+b1 = Input("op3", "TENSOR_FLOAT32", "{4}")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+cm = Int32Scalar("channelMultiplier", 2)
+output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+
+model = model.Operation("DEPTHWISE_CONV_2D",
+ i1, f1, b1,
+ pad0, pad0, pad0, pad0,
+ stride, stride,
+ cm, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [10, 21, 10, 22, 10, 23,
+ 10, 24, 10, 25, 10, 26,
+ 10, 27, 10, 28, 10, 29],
+ f1:
+ [.25, 0, .2, 0,
+ .25, 0, 0, .3,
+ .25, 0, 0, 0,
+ .25, .1, 0, 0],
+ b1:
+ [1, 2, 3, 4]}
+# (i1 (conv) f1) + b1
+# filter usage:
+# in_ch1 * f_1 --> output_d1
+# in_ch1 * f_2 --> output_d2
+# in_ch2 * f_3 --> output_d3
+# in_ch3 * f_4 --> output_d4
+output0 = {output: # output 0
+ [11, 3, 7.2, 10.6,
+ 11, 3, 7.4, 10.9,
+ 11, 3, 7.8, 11.5,
+ 11, 3, 8.0, 11.8]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/depthwise_conv2d_quant8.mod.py b/runtime/test/specs/depthwise_conv2d_quant8.mod.py
index f0478f8..5f43c63 100644
--- a/runtime/test/specs/depthwise_conv2d_quant8.mod.py
+++ b/runtime/test/specs/depthwise_conv2d_quant8.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
-f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
-b1 = Input("op3", "TENSOR_INT32", "{2}, 0.25f, 0")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0", [2, 4, 2, 0, 2, 2, 2, 0])
+b1 = Parameter("op3", "TENSOR_INT32", "{2}, 0.25f, 0", [0, 0])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -32,11 +32,7 @@
# Example 1. Input in operand 0,
input0 = {i1: # input 0
- [4, 16, 4, 32, 4, 64, 4, 128],
- f1:
- [2, 4, 2, 0, 2, 2, 2, 0],
- b1:
- [0, 0]}
+ [4, 16, 4, 32, 4, 64, 4, 128]}
# (i1 (depthconv) f1)
output0 = {output: # output 0
[8, 48]}
diff --git a/runtime/test/specs/depthwise_conv2d_quant8_large.mod.py b/runtime/test/specs/depthwise_conv2d_quant8_large.mod.py
index 506aa58..785e0a7 100644
--- a/runtime/test/specs/depthwise_conv2d_quant8_large.mod.py
+++ b/runtime/test/specs/depthwise_conv2d_quant8_large.mod.py
@@ -16,8 +16,8 @@
model = Model()
i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
-f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
-b1 = Input("op3", "TENSOR_INT32", "{2}, 0.25f, 0")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0", [2, 4, 2, 0, 2, 2, 2, 0])
+b1 = Parameter("op3", "TENSOR_INT32", "{2}, 0.25f, 0", [0, 0])
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
@@ -32,11 +32,7 @@
# Example 1. Input in operand 0,
input0 = {i1: # input 0
- [4, 16, 4, 32, 4, 64, 4, 128],
- f1:
- [2, 4, 2, 0, 2, 2, 2, 0],
- b1:
- [0, 0]}
+ [4, 16, 4, 32, 4, 64, 4, 128]}
# (i1 (depthconv) f1)
output0 = {output: # output 0
[8, 48]}
diff --git a/runtime/test/specs/depthwise_conv2d_quant8_large_weights_as_inputs.mod.py b/runtime/test/specs/depthwise_conv2d_quant8_large_weights_as_inputs.mod.py
new file mode 100644
index 0000000..506aa58
--- /dev/null
+++ b/runtime/test/specs/depthwise_conv2d_quant8_large_weights_as_inputs.mod.py
@@ -0,0 +1,45 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
+b1 = Input("op3", "TENSOR_INT32", "{2}, 0.25f, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+cm = Int32Scalar("channelMultiplier", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 2}, 1.f, 0")
+
+model = model.Operation("DEPTHWISE_CONV_2D",
+ i1, f1, b1,
+ pad0, pad0, pad0, pad0,
+ stride, stride,
+ cm, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [4, 16, 4, 32, 4, 64, 4, 128],
+ f1:
+ [2, 4, 2, 0, 2, 2, 2, 0],
+ b1:
+ [0, 0]}
+# (i1 (depthconv) f1)
+output0 = {output: # output 0
+ [8, 48]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/depthwise_conv2d_quant8_weights_as_inputs.mod.py b/runtime/test/specs/depthwise_conv2d_quant8_weights_as_inputs.mod.py
new file mode 100644
index 0000000..f0478f8
--- /dev/null
+++ b/runtime/test/specs/depthwise_conv2d_quant8_weights_as_inputs.mod.py
@@ -0,0 +1,45 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
+b1 = Input("op3", "TENSOR_INT32", "{2}, 0.25f, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+cm = Int32Scalar("channelMultiplier", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM", "{1,1,1,2}, 1.f, 0")
+
+model = model.Operation("DEPTHWISE_CONV_2D",
+ i1, f1, b1,
+ pad0, pad0, pad0, pad0,
+ stride, stride,
+ cm, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [4, 16, 4, 32, 4, 64, 4, 128],
+ f1:
+ [2, 4, 2, 0, 2, 2, 2, 0],
+ b1:
+ [0, 0]}
+# (i1 (depthconv) f1)
+output0 = {output: # output 0
+ [8, 48]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/fully_connected_float.mod.py b/runtime/test/specs/fully_connected_float.mod.py
index 0c7509d..29a21e7 100644
--- a/runtime/test/specs/fully_connected_float.mod.py
+++ b/runtime/test/specs/fully_connected_float.mod.py
@@ -16,17 +16,15 @@
model = Model()
in0 = Input("op1", "TENSOR_FLOAT32", "{3, 1}")
-weights = Input("op2", "TENSOR_FLOAT32", "{1, 1}")
-bias = Input("b0", "TENSOR_FLOAT32", "{1}")
+weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 1}", [2])
+bias = Parameter("b0", "TENSOR_FLOAT32", "{1}", [4])
out0 = Output("op3", "TENSOR_FLOAT32", "{3, 1}")
act = Int32Scalar("act", 0)
model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
# Example 1. Input in operand 0,
input0 = {in0: # input 0
- [2, 32, 16],
- weights: [2],
- bias: [4]}
+ [2, 32, 16]}
output0 = {out0: # output 0
[8, 68, 36]}
diff --git a/runtime/test/specs/fully_connected_float_large.mod.py b/runtime/test/specs/fully_connected_float_large.mod.py
index f88fbca..2bfa98b 100644
--- a/runtime/test/specs/fully_connected_float_large.mod.py
+++ b/runtime/test/specs/fully_connected_float_large.mod.py
@@ -16,19 +16,15 @@
model = Model()
in0 = Input("op1", "TENSOR_FLOAT32", "{1, 5}") # batch = 1, input_size = 5
-weights = Input("op2", "TENSOR_FLOAT32", "{1, 5}") # num_units = 1, input_size = 5
-bias = Input("b0", "TENSOR_FLOAT32", "{1}")
+weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 5}", [2, 3, 4, 5, 6]) # num_units = 1, input_size = 5
+bias = Parameter("b0", "TENSOR_FLOAT32", "{1}", [900000])
out0 = Output("op3", "TENSOR_FLOAT32", "{1, 1}") # batch = 1, number_units = 1
act = Int32Scalar("act", 0)
model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
# Example 1. Input in operand 0,
input0 = {in0: # input 0
- [1, 10, 100, 1000, 10000],
- weights:
- [2, 3, 4, 5, 6],
- bias:
- [900000]}
+ [1, 10, 100, 1000, 10000]}
output0 = {out0: # output 0
[965432]}
diff --git a/runtime/test/specs/fully_connected_float_large_weights_as_inputs.mod.py b/runtime/test/specs/fully_connected_float_large_weights_as_inputs.mod.py
new file mode 100644
index 0000000..f88fbca
--- /dev/null
+++ b/runtime/test/specs/fully_connected_float_large_weights_as_inputs.mod.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+in0 = Input("op1", "TENSOR_FLOAT32", "{1, 5}") # batch = 1, input_size = 5
+weights = Input("op2", "TENSOR_FLOAT32", "{1, 5}") # num_units = 1, input_size = 5
+bias = Input("b0", "TENSOR_FLOAT32", "{1}")
+out0 = Output("op3", "TENSOR_FLOAT32", "{1, 1}") # batch = 1, number_units = 1
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [1, 10, 100, 1000, 10000],
+ weights:
+ [2, 3, 4, 5, 6],
+ bias:
+ [900000]}
+output0 = {out0: # output 0
+ [965432]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/fully_connected_float_weights_as_inputs.mod.py b/runtime/test/specs/fully_connected_float_weights_as_inputs.mod.py
new file mode 100644
index 0000000..0c7509d
--- /dev/null
+++ b/runtime/test/specs/fully_connected_float_weights_as_inputs.mod.py
@@ -0,0 +1,34 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+in0 = Input("op1", "TENSOR_FLOAT32", "{3, 1}")
+weights = Input("op2", "TENSOR_FLOAT32", "{1, 1}")
+bias = Input("b0", "TENSOR_FLOAT32", "{1}")
+out0 = Output("op3", "TENSOR_FLOAT32", "{3, 1}")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [2, 32, 16],
+ weights: [2],
+ bias: [4]}
+output0 = {out0: # output 0
+ [8, 68, 36]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/fully_connected_quant8.mod.py b/runtime/test/specs/fully_connected_quant8.mod.py
index 50e0220..3bb941b 100644
--- a/runtime/test/specs/fully_connected_quant8.mod.py
+++ b/runtime/test/specs/fully_connected_quant8.mod.py
@@ -16,17 +16,15 @@
model = Model()
in0 = Input("op1", "TENSOR_QUANT8_ASYMM", "{3, 1}, 0.5f, 0")
-weights = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 1}, 0.5f, 0")
-bias = Input("b0", "TENSOR_INT32", "{1}, 0.25f, 0")
+weights = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{1, 1}, 0.5f, 0", [2])
+bias = Parameter("b0", "TENSOR_INT32", "{1}, 0.25f, 0", [4])
out0 = Output("op3", "TENSOR_QUANT8_ASYMM", "{3, 1}, 1.f, 0")
act = Int32Scalar("act", 0)
model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
# Example 1. Input in operand 0,
input0 = {in0: # input 0
- [2, 32, 16],
- weights: [2],
- bias: [4]}
+ [2, 32, 16]}
output0 = {out0: # output 0
[2, 17, 9]}
diff --git a/runtime/test/specs/fully_connected_quant8_large.mod.py b/runtime/test/specs/fully_connected_quant8_large.mod.py
index 70ea525..f04d150 100644
--- a/runtime/test/specs/fully_connected_quant8_large.mod.py
+++ b/runtime/test/specs/fully_connected_quant8_large.mod.py
@@ -16,19 +16,15 @@
model = Model()
in0 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0") # batch = 1, input_size = 5
-weights = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0") # num_units = 1, input_size = 5
-bias = Input("b0", "TENSOR_INT32", "{1}, 0.04, 0")
+weights = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0", [10, 20, 20, 20, 10]) # num_units = 1, input_size = 5
+bias = Parameter("b0", "TENSOR_INT32", "{1}, 0.04, 0", [10])
out0 = Output("op3", "TENSOR_QUANT8_ASYMM", "{1, 1}, 1.f, 0") # batch = 1, number_units = 1
act = Int32Scalar("act", 0)
model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
# Example 1. Input in operand 0,
input0 = {in0: # input 0
- [10, 10, 10, 10, 10],
- weights:
- [10, 20, 20, 20, 10],
- bias:
- [10]}
+ [10, 10, 10, 10, 10]}
output0 = {out0: # output 0
[32]}
diff --git a/runtime/test/specs/fully_connected_quant8_large_weights_as_inputs.mod.py b/runtime/test/specs/fully_connected_quant8_large_weights_as_inputs.mod.py
new file mode 100644
index 0000000..70ea525
--- /dev/null
+++ b/runtime/test/specs/fully_connected_quant8_large_weights_as_inputs.mod.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+in0 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0") # batch = 1, input_size = 5
+weights = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0") # num_units = 1, input_size = 5
+bias = Input("b0", "TENSOR_INT32", "{1}, 0.04, 0")
+out0 = Output("op3", "TENSOR_QUANT8_ASYMM", "{1, 1}, 1.f, 0") # batch = 1, number_units = 1
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [10, 10, 10, 10, 10],
+ weights:
+ [10, 20, 20, 20, 10],
+ bias:
+ [10]}
+output0 = {out0: # output 0
+ [32]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/fully_connected_quant8_weights_as_inputs.mod.py b/runtime/test/specs/fully_connected_quant8_weights_as_inputs.mod.py
new file mode 100644
index 0000000..50e0220
--- /dev/null
+++ b/runtime/test/specs/fully_connected_quant8_weights_as_inputs.mod.py
@@ -0,0 +1,34 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+in0 = Input("op1", "TENSOR_QUANT8_ASYMM", "{3, 1}, 0.5f, 0")
+weights = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 1}, 0.5f, 0")
+bias = Input("b0", "TENSOR_INT32", "{1}, 0.25f, 0")
+out0 = Output("op3", "TENSOR_QUANT8_ASYMM", "{3, 1}, 1.f, 0")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [2, 32, 16],
+ weights: [2],
+ bias: [4]}
+output0 = {out0: # output 0
+ [2, 17, 9]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/lsh_projection.mod.py b/runtime/test/specs/lsh_projection.mod.py
index 5a3eff1..cc46b83 100644
--- a/runtime/test/specs/lsh_projection.mod.py
+++ b/runtime/test/specs/lsh_projection.mod.py
@@ -20,31 +20,20 @@
model = Model()
-hhash = Input("hash", "TENSOR_FLOAT32", "{%d, %d}" % (num_hash, num_bits))
+hhash = Parameter("hash", "TENSOR_FLOAT32", "{%d, %d}" % (num_hash, num_bits),
+ [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765])
lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
weight = Input("weight", "TENSOR_FLOAT32", "{%d}" % (num_input))
-type_param = Input("type_param", "TENSOR_INT32", "{1}")
+type_param = Int32Scalar("type_param", 2) # DENSE
output = Output("output", "TENSOR_INT32", "{%d, %d}" % (num_hash, num_bits))
-model = model.Operation("LSH_PROJECTION", hhash, lookup, weight, type_param).To(output)
+model = model.Operation("LSH_PROJECTION", hhash, lookup, weight,
+ type_param).To(output)
-input0 = {lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
- hhash: [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765],
- weight: [0.12, 0.34, 0.56],
- type_param: [2], # DENSE
- }
-
+#TODO: weight should be a constant, too.
+input0 = {
+ lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
+ weight: [0.12, 0.34, 0.56]
+}
output0 = {output: [1, 1, 1, 0, 1, 1, 1, 0]}
Example((input0, output0))
-
-# Omit weight, since this is a sparse projection, for which the optional weight
-# input should be left unset.
-input1 = {lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
- hhash: [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765],
- weight: [],
- type_param: [1], # SPARSE
- }
-
-output1 = {output: [1,2,2,0]}
-
-Example((input1, output1))
diff --git a/runtime/test/specs/lsh_projection_2.mod.py b/runtime/test/specs/lsh_projection_2.mod.py
new file mode 100644
index 0000000..b39d8bb
--- /dev/null
+++ b/runtime/test/specs/lsh_projection_2.mod.py
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+num_input = 3
+num_hash = 4
+num_bits = 2
+
+model = Model()
+
+hhash = Parameter("hash", "TENSOR_FLOAT32", "{%d, %d}" % (num_hash, num_bits),
+ [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765])
+lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
+weight = Input("weight", "TENSOR_FLOAT32", "{%d}" % (num_input))
+type_param = Int32Scalar("type_param", 1) # SPARSE
+output = Output("output", "TENSOR_INT32", "{%d, %d}" % (num_hash, num_bits))
+model = model.Operation("LSH_PROJECTION", hhash, lookup, weight,
+ type_param).To(output)
+
+# Omit weight, since this is a sparse projection, for which the optional weight
+# input should be left unset.
+input0 = {
+ lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
+ weight: [],
+}
+
+output0 = {output: [1, 2, 2, 0]}
+
+Example((input0, output0))
diff --git a/runtime/test/specs/lsh_projection_weights_as_inputs.mod.py b/runtime/test/specs/lsh_projection_weights_as_inputs.mod.py
new file mode 100644
index 0000000..5a3eff1
--- /dev/null
+++ b/runtime/test/specs/lsh_projection_weights_as_inputs.mod.py
@@ -0,0 +1,50 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+num_input = 3
+num_hash = 4
+num_bits = 2
+
+model = Model()
+
+hhash = Input("hash", "TENSOR_FLOAT32", "{%d, %d}" % (num_hash, num_bits))
+lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
+weight = Input("weight", "TENSOR_FLOAT32", "{%d}" % (num_input))
+type_param = Input("type_param", "TENSOR_INT32", "{1}")
+output = Output("output", "TENSOR_INT32", "{%d, %d}" % (num_hash, num_bits))
+model = model.Operation("LSH_PROJECTION", hhash, lookup, weight, type_param).To(output)
+
+input0 = {lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
+ hhash: [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765],
+ weight: [0.12, 0.34, 0.56],
+ type_param: [2], # DENSE
+ }
+
+output0 = {output: [1, 1, 1, 0, 1, 1, 1, 0]}
+
+Example((input0, output0))
+
+# Omit weight, since this is a sparse projection, for which the optional weight
+# input should be left unset.
+input1 = {lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
+ hhash: [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765],
+ weight: [],
+ type_param: [1], # SPARSE
+ }
+
+output1 = {output: [1,2,2,0]}
+
+Example((input1, output1))
diff --git a/runtime/test/specs/reshape.mod.py b/runtime/test/specs/reshape.mod.py
index c64ac57..2a6cfc9 100644
--- a/runtime/test/specs/reshape.mod.py
+++ b/runtime/test/specs/reshape.mod.py
@@ -1,7 +1,7 @@
# model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 3, 3}") # a line of 3 pixels, 3 components/pixel
-i2 = Input("op2", "TENSOR_INT32", "{1}") # another vector of 2 float32s
+i2 = Parameter("op2", "TENSOR_INT32", "{1}", [-1]) # another vector of 2 float32s
i3 = Output("op3", "TENSOR_FLOAT32", "{9}")
model = model.Operation("RESHAPE", i1, i2).To(i3)
@@ -9,9 +9,7 @@
input0 = {i1: # input 0
[1, 2, 3,
4, 5, 6,
- 7, 8, 9],
- i2: # input 1
- [-1]}
+ 7, 8, 9]}
output0 = {i3: # output 0
[1, 2, 3, 4, 5, 6, 7, 8, 9]}
diff --git a/runtime/test/specs/reshape_quant8.mod.py b/runtime/test/specs/reshape_quant8.mod.py
index f1f6814..a958641 100644
--- a/runtime/test/specs/reshape_quant8.mod.py
+++ b/runtime/test/specs/reshape_quant8.mod.py
@@ -2,7 +2,7 @@
model = Model()
# a line of 3 pixels, 3 components/pixel
i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 1, 3, 3}, 1.f, 0")
-i2 = Input("op2", "TENSOR_INT32", "{1}")
+i2 = Parameter("op2", "TENSOR_INT32", "{1}", [-1])
i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{9}, 1.f, 0")
model = model.Operation("RESHAPE", i1, i2).To(i3)
@@ -10,9 +10,7 @@
input0 = {i1: # input 0
[1, 2, 3,
4, 5, 6,
- 7, 8, 9],
- i2: # input 1
- [-1]}
+ 7, 8, 9]}
output0 = {i3: # output 0
[1, 2, 3, 4, 5, 6, 7, 8, 9]}
diff --git a/runtime/test/specs/reshape_quant8_weights_as_inputs.mod.py b/runtime/test/specs/reshape_quant8_weights_as_inputs.mod.py
new file mode 100644
index 0000000..f1f6814
--- /dev/null
+++ b/runtime/test/specs/reshape_quant8_weights_as_inputs.mod.py
@@ -0,0 +1,21 @@
+# model
+model = Model()
+# a line of 3 pixels, 3 components/pixel
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 1, 3, 3}, 1.f, 0")
+i2 = Input("op2", "TENSOR_INT32", "{1}")
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{9}, 1.f, 0")
+model = model.Operation("RESHAPE", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9],
+ i2: # input 1
+ [-1]}
+
+output0 = {i3: # output 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtime/test/specs/reshape_weights_as_inputs.mod.py b/runtime/test/specs/reshape_weights_as_inputs.mod.py
new file mode 100644
index 0000000..c64ac57
--- /dev/null
+++ b/runtime/test/specs/reshape_weights_as_inputs.mod.py
@@ -0,0 +1,20 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 3, 3}") # a line of 3 pixels, 3 components/pixel
+i2 = Input("op2", "TENSOR_INT32", "{1}") # another vector of 2 float32s
+i3 = Output("op3", "TENSOR_FLOAT32", "{9}")
+model = model.Operation("RESHAPE", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9],
+ i2: # input 1
+ [-1]}
+
+output0 = {i3: # output 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tools/test_generator/test_generator.py b/tools/test_generator/test_generator.py
index 087032f..0b6f8df 100755
--- a/tools/test_generator/test_generator.py
+++ b/tools/test_generator/test_generator.py
@@ -240,12 +240,13 @@
# Holds reference to all Inputs; used by Topoligcal sort as starting nodes.
__inputs = set()
- def __init__(self, name, vt, shape):
+ def __init__(self, name, vt, shape, increase_next_number=True):
Operand.__init__(self, name, Type(vt, shape))
Definitions.__init__(self)
Input.__inputs.add(self)
self.number = Input.__next_number
- Input.__next_number += 1
+ if increase_next_number is True:
+ Input.__next_number += 1
def lifetime(self):
return "MODEL_INPUT"
@@ -313,10 +314,18 @@
def lifetime(self):
return "CONSTANT_COPY"
+# Print in C float literal format
+def pretty_print_as_float(x):
+ s = str(float(x))
+ if s.find(".") >= 0 or s.find("e") >= 0:
+ return s + "f"
+ else:
+ return s + ".0f"
+
class Parameter(Input):
# TODO seems wrong that's an Input.
def __init__(self, name, vt, shape, initializer):
- Input.__init__(self, name, vt, shape)
+ Input.__init__(self, name, vt, shape, False)
self.initializer = initializer
self.cpptype = TypeLookup.get_cpptype(vt)
def is_internal(self):
@@ -325,7 +334,7 @@
init_name = self.get_name() + "_init"
initializer = [str(x) for x in self.initializer]
if self.cpptype == "float":
- initializer = [ x+"f" for x in initializer]
+ initializer = [ pretty_print_as_float(x) for x in initializer]
init = self.cpptype + " " + init_name + "[]"
init = "static " + init + " = {" + ", ".join(initializer) + "};"
args = [ self.get_name(), init_name,
@@ -629,14 +638,6 @@
return (args.model, args.example)
-# Print in C float literal format
-def pretty_print_as_float(x):
- s = str(float(x))
- if s.find(".") >= 0:
- return s + "f"
- else:
- return s + ".0f"
-
# Generate operands in VTS format
def generate_vts_operands():
# Dump operand definitions