Change the name of RaggedShape to DynamicRaggedShape.
Changed the file names accordingly.
PiperOrigin-RevId: 427899678
Change-Id: I45336202b7b5fbee658f80b711512dd31e159b43
diff --git a/tensorflow/python/ops/ragged/BUILD b/tensorflow/python/ops/ragged/BUILD
index 19d2e6a..70658af 100644
--- a/tensorflow/python/ops/ragged/BUILD
+++ b/tensorflow/python/ops/ragged/BUILD
@@ -18,6 +18,7 @@
srcs_version = "PY3",
tags = ["nofixdeps"],
deps = [
+ ":dynamic_ragged_shape",
":ragged_array_ops",
":ragged_batch_gather_ops",
":ragged_batch_gather_with_default_op",
@@ -35,7 +36,6 @@
":ragged_math_ops",
":ragged_operators",
":ragged_ops",
- ":ragged_shape",
":ragged_string_ops",
":ragged_tensor",
":ragged_tensor_shape",
@@ -52,9 +52,9 @@
srcs = ["ragged_array_ops.py"],
srcs_version = "PY3",
deps = [
+ ":dynamic_ragged_shape",
":ragged_functional_ops",
":ragged_math_ops",
- ":ragged_shape",
":ragged_tensor",
":ragged_util",
":segment_id_ops",
@@ -250,6 +250,7 @@
srcs = ["ragged_ops.py"],
srcs_version = "PY3",
deps = [
+ ":dynamic_ragged_shape",
":ragged_array_ops",
":ragged_batch_gather_ops",
":ragged_batch_gather_with_default_op",
@@ -387,8 +388,8 @@
)
py_library(
- name = "ragged_shape",
- srcs = ["ragged_shape.py"],
+ name = "dynamic_ragged_shape",
+ srcs = ["dynamic_ragged_shape.py"],
srcs_version = "PY3",
deps = [
":ragged_tensor",
@@ -505,12 +506,12 @@
srcs = ["ragged_dispatch.py"],
srcs_version = "PY3",
deps = [
+ ":dynamic_ragged_shape", # fixdeps: keep
":ragged_array_ops", # fixdeps: keep
":ragged_batch_gather_ops", # fixdeps: keep
":ragged_concat_ops", # fixdeps: keep
":ragged_gather_ops", # fixdeps: keep
":ragged_math_ops", # fixdeps: keep
- ":ragged_shape", # fixdeps: keep
":ragged_squeeze_op", # fixdeps: keep
":ragged_tensor",
":ragged_tensor_shape",
@@ -1225,16 +1226,16 @@
)
py_test(
- name = "ragged_shape_test",
+ name = "dynamic_ragged_shape_test",
size = "medium",
- srcs = ["ragged_shape_test.py"],
+ srcs = ["dynamic_ragged_shape_test.py"],
python_version = "PY3",
shard_count = 8,
srcs_version = "PY3",
deps = [
+ ":dynamic_ragged_shape",
":ragged", # fixdeps: keep
":ragged_factory_ops",
- ":ragged_shape",
":ragged_tensor",
":row_partition",
"//tensorflow/python:array_ops",
diff --git a/tensorflow/python/ops/ragged/ragged_shape.py b/tensorflow/python/ops/ragged/dynamic_ragged_shape.py
similarity index 94%
rename from tensorflow/python/ops/ragged/ragged_shape.py
rename to tensorflow/python/ops/ragged/dynamic_ragged_shape.py
index 5d39335..19884fb 100644
--- a/tensorflow/python/ops/ragged/ragged_shape.py
+++ b/tensorflow/python/ops/ragged/dynamic_ragged_shape.py
@@ -44,11 +44,11 @@
# Allowing inner_shape might mean allowing inner_shape to be initialized by
# a fully defined TensorShape, or it might mean that you can actually store
# TensorShape in the inner_shape field. This could conceivably construct
-# a RaggedShape that was dtype agnostic.
+# a DynamicRaggedShape that was dtype agnostic.
#
# TODO(martinz): unify the impl of the determination of index type across
-# RowPartition and RaggedShape.
-class RaggedShape:
+# RowPartition and DynamicRaggedShape.
+class DynamicRaggedShape:
"""The shape of a ragged or dense tensor.
Ragged shapes are encoded using two fields:
@@ -57,12 +57,12 @@
* `row_partitions`: A list of `RowPartition` objects, describing how
that flat shape should be partitioned to add ragged axes.
- If a RaggedShape is the shape of a RaggedTensor rt, then:
+ If a DynamicRaggedShape is the shape of a RaggedTensor rt, then:
1. row_partitions = rt._nested_row_partitions
(and thus len(row_partitions) > 0)
2. inner_shape is the shape of rt.flat_values
- If a RaggedShape is the shape of a dense tensor t, then:
+ If a DynamicRaggedShape is the shape of a dense tensor t, then:
1. row_partitions = []
2. inner_shape is the shape of t.
@@ -80,10 +80,10 @@
"""
def __init__(self, row_partitions, inner_shape, dtype=None, validate=False):
- """Core constructor for a RaggedShape.
+ """Core constructor for a DynamicRaggedShape.
- Create a RaggedShape. This can be used to construct a
- RaggedShape representing a ragged or dense shape. If row_partitions
+ Create a DynamicRaggedShape. This can be used to construct a
+ DynamicRaggedShape representing a ragged or dense shape. If row_partitions
is an empty list, then this is equivalent to a dense shape.
If row_partitions is specified, then the num_row_partitions will be equal
@@ -133,7 +133,7 @@
for axis, row_partition in enumerate(self._row_partitions):
if axis > 0:
previous_row_partition = self._row_partitions[axis - 1]
- msg = ("RowPartitions in RaggedShape do not align "
+ msg = ("RowPartitions in DynamicRaggedShape do not align "
f"between {axis - 1} and {axis}")
static_nrows = row_partition.static_nrows
static_nvals = previous_row_partition.static_nvals
@@ -236,7 +236,7 @@
dtype: the dtype of the shape (tf.int32 or tf.int64).
Returns:
- a new RaggedShape
+ a new DynamicRaggedShape
"""
if not isinstance(lengths, list):
raise ValueError("lengths should be a list")
@@ -261,7 +261,7 @@
if not lengths:
if num_row_partitions > 0:
raise ValueError("num_row_partitions==0 for a scalar shape")
- return RaggedShape([], [], dtype=dtype)
+ return DynamicRaggedShape([], [], dtype=dtype)
if not num_row_partitions < len(lengths):
raise ValueError(
@@ -273,9 +273,9 @@
(row_partitions, nvals) = _to_row_partitions_and_nvals_from_lengths(
lengths[:num_row_partitions + 1])
inner_shape = [nvals] + lengths[num_row_partitions + 1:]
- return RaggedShape(row_partitions, inner_shape, dtype=dtype)
+ return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype)
else:
- return RaggedShape([], lengths, dtype=dtype)
+ return DynamicRaggedShape([], lengths, dtype=dtype)
@classmethod
def from_row_partitions(cls, row_partitions, dtype=None):
@@ -286,27 +286,28 @@
dtype: the dtype to use, or None to use the row_partitions dtype.
Returns:
- a RaggedShape with inner_rank==1.
+ a DynamicRaggedShape with inner_rank==1.
"""
if not row_partitions:
raise ValueError("row_partitions cannot be empty")
inner_shape = [row_partitions[-1].nvals()]
- return RaggedShape(row_partitions, inner_shape, dtype=dtype)
+ return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype)
@classmethod
def _from_inner_shape(cls, inner_shape, dtype=None):
"""Create a shape from inner_shape, where num_row_partitions == 0."""
- return RaggedShape([], inner_shape, dtype=dtype)
+ return DynamicRaggedShape([], inner_shape, dtype=dtype)
# pylint: disable=protected-access
@classmethod
def from_tensor(cls, t, dtype=None):
"""Constructs a ragged shape for a potentially ragged tensor."""
if ragged_tensor.is_ragged(t):
- return RaggedShape(t._nested_row_partitions, _flat_values_shape(t),
- dtype=dtype)
+ return DynamicRaggedShape(
+ t._nested_row_partitions, _flat_values_shape(t), dtype=dtype)
else:
- return RaggedShape._from_inner_shape(array_ops.shape(t), dtype=dtype)
+ return DynamicRaggedShape._from_inner_shape(
+ array_ops.shape(t), dtype=dtype)
@property
def row_partitions(self):
@@ -371,7 +372,7 @@
def __repr__(self):
lengths = _list_with_ellipsis_to_str(self.static_lengths())
- return ("<RaggedShape "
+ return ("<DynamicRaggedShape "
"lengths=%s num_row_partitions=%r>" %
(lengths, self.num_row_partitions))
@@ -397,22 +398,23 @@
stop: the last dimension (exclusive). 0 <= stop <= rank
"""
if stop <= start:
- return RaggedShape._from_inner_shape([])
+ return DynamicRaggedShape._from_inner_shape([])
elif start == 0:
if stop <= self.num_row_partitions:
if stop == 1:
- return RaggedShape._from_inner_shape([self.row_partitions[0].nrows()])
+ return DynamicRaggedShape._from_inner_shape(
+ [self.row_partitions[0].nrows()])
new_row_partitions = self.row_partitions[:stop - 1]
new_inner_shape = [new_row_partitions[-1].nvals()]
- return RaggedShape(new_row_partitions, new_inner_shape)
+ return DynamicRaggedShape(new_row_partitions, new_inner_shape)
else:
if self.rank <= stop:
return self
if self.num_row_partitions == 0:
- return RaggedShape._from_inner_shape(self.inner_shape[:stop])
+ return DynamicRaggedShape._from_inner_shape(self.inner_shape[:stop])
else:
new_inner_shape = self.inner_shape[:stop - self.num_row_partitions]
- return RaggedShape(self.row_partitions, new_inner_shape)
+ return DynamicRaggedShape(self.row_partitions, new_inner_shape)
else:
if stop < self.rank:
partial = self._slice_shape(0, stop)
@@ -422,7 +424,7 @@
if not x.is_uniform():
raise ValueError("All relevant dimensions must be uniform")
- return RaggedShape._from_inner_shape(
+ return DynamicRaggedShape._from_inner_shape(
partial._with_num_row_partitions(0).inner_shape[start:])
def _dimension(self, index):
@@ -524,7 +526,7 @@
Effectively, this is self[:axis+1]._num_elements()
Example:
- shape = RaggedShape._from_inner_shape([2, 3, 4])
+ shape = DynamicRaggedShape._from_inner_shape([2, 3, 4])
shape._num_slices_in_dimension(0) = 2
shape._num_slices_in_dimension(1) = 6
shape._num_slices_in_dimension(2) = 24
@@ -722,19 +724,21 @@
row_length, nrows=nrows, dtype=self.dtype)
more_rp.append(rp)
alt_inner = self._alt_inner_shape(new_inner_rank)
- return RaggedShape(
+ return DynamicRaggedShape(
list(self.row_partitions) + more_rp, alt_inner)
else:
assert num_row_partitions < self.num_row_partitions
- return RaggedShape(self.row_partitions[:num_row_partitions],
- self._alt_inner_shape(self.rank - num_row_partitions))
+ return DynamicRaggedShape(
+ self.row_partitions[:num_row_partitions],
+ self._alt_inner_shape(self.rank - num_row_partitions))
def with_dtype(self, dtype):
"""Change the dtype of the shape."""
if dtype == self.dtype:
return self
else:
- return RaggedShape(self.row_partitions, self.inner_shape, dtype=dtype)
+ return DynamicRaggedShape(
+ self.row_partitions, self.inner_shape, dtype=dtype)
def _as_row_partitions(self):
"""Returns row partitions representing this shape.
@@ -810,8 +814,8 @@
return flat_values
-def broadcast_dynamic_shape(shape_x: RaggedShape,
- shape_y: RaggedShape) -> RaggedShape:
+def broadcast_dynamic_shape(shape_x: DynamicRaggedShape,
+ shape_y: DynamicRaggedShape) -> DynamicRaggedShape:
"""Returns the shape formed by broadcasting two shapes to be compatible.
1. If shape_x and shape_y both have row_partitions, then fail if their dtypes
@@ -821,23 +825,23 @@
3. If one has row_partitions, go with that dtype.
Args:
- shape_x: A `RaggedShape`
- shape_y: A `RaggedShape`
+ shape_x: A `DynamicRaggedShape`
+ shape_y: A `DynamicRaggedShape`
Returns:
- A `RaggedShape`.
+ A `DynamicRaggedShape`.
Raises:
ValueError: If `shape_x` and `shape_y` are not broadcast-compatible.
"""
- if not isinstance(shape_x, RaggedShape):
- raise TypeError("shape_x must be a RaggedShape")
- if not isinstance(shape_y, RaggedShape):
- raise TypeError("shape_y must be a RaggedShape")
+ if not isinstance(shape_x, DynamicRaggedShape):
+ raise TypeError("shape_x must be a DynamicRaggedShape")
+ if not isinstance(shape_y, DynamicRaggedShape):
+ raise TypeError("shape_y must be a DynamicRaggedShape")
return broadcast_dynamic_shape_extended(shape_x, shape_y)[0]
-def broadcast_to(rt_input, shape: RaggedShape):
+def broadcast_to(rt_input, shape: DynamicRaggedShape):
"""Broadcasts a potentially ragged tensor to a ragged shape.
Tiles `rt_input` as necessary to match the given shape.
@@ -846,14 +850,14 @@
Args:
rt_input: The potentially ragged tensor to broadcast.
- shape: A `RaggedShape`
+ shape: A `DynamicRaggedShape`
Returns:
A potentially ragged tensor whose values are taken from
`rt_input`, and whose shape matches `shape`.
"""
- if not isinstance(shape, RaggedShape):
- raise TypeError("shape must be a RaggedShape")
+ if not isinstance(shape, DynamicRaggedShape):
+ raise TypeError("shape must be a DynamicRaggedShape")
rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)
origin_shape = None
if ragged_tensor.is_ragged(rt_input):
@@ -862,12 +866,13 @@
raise ValueError("Cannot coerce row_splits.dtype")
else:
shape = shape.with_dtype(rt_input.row_splits.dtype)
- origin_shape = RaggedShape.from_tensor(rt_input)
+ origin_shape = DynamicRaggedShape.from_tensor(rt_input)
else:
if shape.num_row_partitions != 0:
- origin_shape = RaggedShape.from_tensor(rt_input, dtype=shape.dtype)
+ origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=shape.dtype)
else:
- origin_shape = RaggedShape.from_tensor(rt_input, dtype=dtypes.int64)
+ origin_shape = DynamicRaggedShape.from_tensor(rt_input,
+ dtype=dtypes.int64)
shape = shape.with_dtype(dtype=dtypes.int64)
broadcaster = _get_broadcaster(origin_shape, shape)
@@ -875,8 +880,8 @@
def broadcast_dynamic_shape_extended(
- a: RaggedShape,
- b: RaggedShape): # -> Tuple[RaggedShape, _Broadcaster, _Broadcaster]
+ a: DynamicRaggedShape, b: DynamicRaggedShape
+): # -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]
"""Gets the smallest shape to which a and b can broadcast.
In order to create the smallest shape, one must also do most of the
@@ -892,8 +897,8 @@
return (c, ac, bc)
Args:
- a: a RaggedShape
- b: a RaggedShape
+ a: a DynamicRaggedShape
+ b: a DynamicRaggedShape
Returns:
A triple of a shape and two broadcasters.
@@ -919,7 +924,7 @@
elif a.rank == 1 and b.rank == 1:
[a_layer, b_layer,
target] = _broadcast_dynamic_shape_one_layer(a.inner_shape, b.inner_shape)
- target_shape = RaggedShape._from_inner_shape(target) # pylint: disable=protected-access
+ target_shape = DynamicRaggedShape._from_inner_shape(target) # pylint: disable=protected-access
return (target_shape, _Broadcaster(a, target_shape, [a_layer]),
_Broadcaster(b, target_shape, [b_layer]))
@@ -955,8 +960,8 @@
if ((x_is_ragged and y_is_ragged) or
(x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or
(y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)):
- shape_x = RaggedShape.from_tensor(x)
- shape_y = RaggedShape.from_tensor(y)
+ shape_x = DynamicRaggedShape.from_tensor(x)
+ shape_y = DynamicRaggedShape.from_tensor(y)
if shape_x.dtype != shape_y.dtype:
if not x_is_ragged:
shape_x = shape_x.with_dtype(shape_y.dtype)
@@ -1256,18 +1261,18 @@
Note: source_shape.rank and target_shape.rank must be known.
Args:
- source_shape: the source RaggedShape
- target_shape: the target RaggedShape
+ source_shape: the source DynamicRaggedShape
+ target_shape: the target DynamicRaggedShape
layer_broadcasters: List[_LayerBroadcaster] of length source_shape.rank.
dtype: the preferred dtype of the broadcaster.
Raises:
TypeError: if the input types don't match.
"""
- if not isinstance(source_shape, RaggedShape):
- raise TypeError("source_shape is not a RaggedShape")
- if not isinstance(target_shape, RaggedShape):
- raise TypeError("target_shape is not a RaggedShape")
+ if not isinstance(source_shape, DynamicRaggedShape):
+ raise TypeError("source_shape is not a DynamicRaggedShape")
+ if not isinstance(target_shape, DynamicRaggedShape):
+ raise TypeError("target_shape is not a DynamicRaggedShape")
if not isinstance(layer_broadcasters, list):
raise TypeError("layer_broadcasters not a list: " +
str(layer_broadcasters))
@@ -1981,7 +1986,8 @@
return ([], [], [])
-def _get_broadcast_num_row_partitions(a: RaggedShape, b: RaggedShape):
+def _get_broadcast_num_row_partitions(a: DynamicRaggedShape,
+ b: DynamicRaggedShape):
"""Returns broadcast_dynamic_shape(a, b).num_row_partitions."""
# Assumes rank and num_row_partitions are not None.
if (a.num_row_partitions == 0 and b.num_row_partitions == 0):
@@ -2000,10 +2006,10 @@
# pylint: disable=protected-access
def _broadcast_dynamic_shape_extended_complete(
- a: RaggedShape, b: RaggedShape, b_rps: Sequence[RowPartition],
+ a: DynamicRaggedShape, b: DynamicRaggedShape, b_rps: Sequence[RowPartition],
c_suffix: Sequence[RowPartition], ac: Sequence[_LayerBroadcaster],
bc_suffix: Sequence[_LayerBroadcaster]
-) -> Tuple[RaggedShape, _Broadcaster, _Broadcaster]:
+) -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]:
"""Helper for broadcast_dynamic_shape_extended."""
c_prefix = b_rps[:-len(c_suffix)]
bc_prefix_length = b.rank - len(bc_suffix)
@@ -2013,14 +2019,14 @@
]
c_num_row_partitions = _get_broadcast_num_row_partitions(a, b)
- c_raw = RaggedShape.from_row_partitions(c_prefix + tuple(c_suffix))
+ c_raw = DynamicRaggedShape.from_row_partitions(c_prefix + tuple(c_suffix))
c = c_raw._with_num_row_partitions(c_num_row_partitions)
return (c, _Broadcaster(a, c, ac), _Broadcaster(b, c, bc_prefix + bc_suffix))
def _broadcast_dynamic_shape_extended_helper(
- a: RaggedShape,
- b: RaggedShape) -> Tuple[RaggedShape, _Broadcaster, _Broadcaster]:
+ a: DynamicRaggedShape, b: DynamicRaggedShape
+) -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]:
"""Helper for broadcast_dynamic_shape_extended.
Here, we force:
@@ -2028,8 +2034,8 @@
2 <= b.rank
1 <= a.rank
Args:
- a: a RaggedShape
- b: a RaggedShape
+ a: a DynamicRaggedShape
+ b: a DynamicRaggedShape
Returns:
A triple of a shape and two broadcasters.
@@ -2286,8 +2292,7 @@
new_inner_rank_tail_length = new_inner_rank - 1
inner_shape_tail = shape[-new_inner_rank_tail_length:].as_list()
first_dim = shape[:-new_inner_rank_tail_length].num_elements()
- return constant_op.constant([first_dim] + inner_shape_tail,
- dtype=dtype)
+ return constant_op.constant([first_dim] + inner_shape_tail, dtype=dtype)
# TODO(b/218932570)
diff --git a/tensorflow/python/ops/ragged/ragged_shape_test.py b/tensorflow/python/ops/ragged/dynamic_ragged_shape_test.py
similarity index 86%
rename from tensorflow/python/ops/ragged/ragged_shape_test.py
rename to tensorflow/python/ops/ragged/dynamic_ragged_shape_test.py
index 74f605c..54d63ba 100644
--- a/tensorflow/python/ops/ragged/ragged_shape_test.py
+++ b/tensorflow/python/ops/ragged/dynamic_ragged_shape_test.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Tests for tf.ragged.ragged_shape."""
+"""Tests for tf.ragged.dynamic_ragged_shape."""
from typing import Sequence, Union
@@ -35,12 +35,12 @@
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
+from tensorflow.python.ops.ragged import dynamic_ragged_shape
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
-from tensorflow.python.ops.ragged import ragged_shape
from tensorflow.python.ops.ragged import ragged_tensor
-from tensorflow.python.ops.ragged.ragged_shape import _LayerBroadcaster
-from tensorflow.python.ops.ragged.ragged_shape import RaggedShape
+from tensorflow.python.ops.ragged.dynamic_ragged_shape import _LayerBroadcaster
+from tensorflow.python.ops.ragged.dynamic_ragged_shape import DynamicRaggedShape
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.ops.ragged.row_partition import RowPartition
from tensorflow.python.platform import googletest
@@ -60,7 +60,7 @@
a sequence of RowPartitions.
"""
(result,
- _) = ragged_shape._to_row_partitions_and_nvals_from_lengths(lengths)
+ _) = dynamic_ragged_shape._to_row_partitions_and_nvals_from_lengths(lengths)
return result
@@ -110,20 +110,21 @@
def _num_elements_of_lengths(lengths: Sequence[Union[int, Sequence[int]]]):
- """Static version of RaggedShape.from_lengths(lengths)._num_elements()."""
+ """Static version of DynamicRaggedShape.from_lengths(lengths)._num_elements()."""
return _num_elements_of_lengths_with_rows(1, lengths)
def _to_prime_tensor_from_lengths(
lengths: Sequence[Union[int, Sequence[int]]]) -> RaggedTensor:
"""Create a tensor of primes with the shape specified."""
- shape = RaggedShape.from_lengths(lengths)
+ shape = DynamicRaggedShape.from_lengths(lengths)
num_elements = _num_elements_of_lengths(lengths)
return ragged_array_ops.ragged_reshape(_lowest_primes(num_elements), shape)
@test_util.run_all_in_graph_and_eager_modes
-class RaggedShapeTest(test_util.TensorFlowTestCase, parameterized.TestCase):
+class DynamicRaggedShapeTest(test_util.TensorFlowTestCase,
+ parameterized.TestCase):
def assertRowPartitionEq(self,
x: RowPartition,
@@ -131,9 +132,12 @@
msg=None) -> None:
self.assertAllEqual(x.row_splits(), y.row_splits(), msg=msg)
- def assertShapeEq(self, x: RaggedShape, y: RaggedShape, msg=None) -> None:
- assert isinstance(x, RaggedShape)
- assert isinstance(y, RaggedShape)
+ def assertShapeEq(self,
+ x: DynamicRaggedShape,
+ y: DynamicRaggedShape,
+ msg=None) -> None:
+ assert isinstance(x, DynamicRaggedShape)
+ assert isinstance(y, DynamicRaggedShape)
if msg is None:
msg = ''
self.assertLen(
@@ -152,10 +156,10 @@
assert isinstance(y, _LayerBroadcaster)
self.assertAllEqual(x.gather_index, y.gather_index)
- def assertBroadcasterEq(self, x: ragged_shape._Broadcaster,
- y: ragged_shape._Broadcaster) -> None:
- assert isinstance(x, ragged_shape._Broadcaster)
- assert isinstance(y, ragged_shape._Broadcaster)
+ def assertBroadcasterEq(self, x: dynamic_ragged_shape._Broadcaster,
+ y: dynamic_ragged_shape._Broadcaster) -> None:
+ assert isinstance(x, dynamic_ragged_shape._Broadcaster)
+ assert isinstance(y, dynamic_ragged_shape._Broadcaster)
self.assertShapeEq(x.source_shape, y.source_shape)
self.assertShapeEq(x.target_shape, y.target_shape)
self.assertLen(x._layer_broadcasters, len(y._layer_broadcasters))
@@ -209,9 +213,9 @@
inner_shape=[5]),
])
def testFromTensor(self, value, row_partitions, inner_shape):
- shape = RaggedShape.from_tensor(value)
+ shape = DynamicRaggedShape.from_tensor(value)
row_partitions = [RowPartition.from_row_splits(x) for x in row_partitions]
- expected = RaggedShape(row_partitions, inner_shape)
+ expected = DynamicRaggedShape(row_partitions, inner_shape)
self.assertShapeEq(shape, expected)
# pylint:disable=g-long-lambda
@@ -286,9 +290,9 @@
num_row_partitions=None):
if callable(row_partitions):
row_partitions = row_partitions()
- shape = RaggedShape.from_lengths(
+ shape = DynamicRaggedShape.from_lengths(
lengths, num_row_partitions=num_row_partitions)
- expected = RaggedShape(row_partitions, inner_shape)
+ expected = DynamicRaggedShape(row_partitions, inner_shape)
self.assertShapeEq(shape, expected)
@parameterized.parameters([
@@ -320,26 +324,27 @@
])
def testFromLengthsError(self, lengths, msg, num_row_partitions=None):
with self.assertRaisesRegex(ValueError, msg):
- RaggedShape.from_lengths(lengths, num_row_partitions=num_row_partitions)
+ DynamicRaggedShape.from_lengths(
+ lengths, num_row_partitions=num_row_partitions)
def testGetBroadcaster(self):
- origin_shape = RaggedShape([RowPartition.from_uniform_row_length(1, 3)],
- inner_shape=[3])
- dest_shape = RaggedShape([RowPartition.from_uniform_row_length(2, 6)],
- inner_shape=[6])
- actual = ragged_shape._get_broadcaster(origin_shape, dest_shape)
- expected = ragged_shape._Broadcaster(origin_shape, dest_shape, [
+ origin_shape = DynamicRaggedShape(
+ [RowPartition.from_uniform_row_length(1, 3)], inner_shape=[3])
+ dest_shape = DynamicRaggedShape(
+ [RowPartition.from_uniform_row_length(2, 6)], inner_shape=[6])
+ actual = dynamic_ragged_shape._get_broadcaster(origin_shape, dest_shape)
+ expected = dynamic_ragged_shape._Broadcaster(origin_shape, dest_shape, [
_LayerBroadcaster.from_gather_index([0, 1, 2]),
_LayerBroadcaster.from_gather_index([0, 0, 1, 1, 2, 2])
])
self.assertBroadcasterEq(actual, expected)
def testGetBroadcaster2(self):
- origin_shape = RaggedShape([], inner_shape=[])
- dest_shape = RaggedShape([RowPartition.from_row_splits([0, 2, 3])],
- inner_shape=[3])
- actual = ragged_shape._get_broadcaster(origin_shape, dest_shape)
- expected = ragged_shape._Broadcaster(origin_shape, dest_shape, [])
+ origin_shape = DynamicRaggedShape([], inner_shape=[])
+ dest_shape = DynamicRaggedShape([RowPartition.from_row_splits([0, 2, 3])],
+ inner_shape=[3])
+ actual = dynamic_ragged_shape._get_broadcaster(origin_shape, dest_shape)
+ expected = dynamic_ragged_shape._Broadcaster(origin_shape, dest_shape, [])
self.assertBroadcasterEq(actual, expected)
@parameterized.parameters([
@@ -361,7 +366,7 @@
dict(lengths=[2, (2, 3), 7], axis=-3, expected=2),
])
def testNumSlicesInDimension(self, lengths, axis, expected):
- original = RaggedShape.from_lengths(lengths)
+ original = DynamicRaggedShape.from_lengths(lengths)
actual = original._num_slices_in_dimension(axis)
self.assertAllEqual(expected, actual)
@@ -374,7 +379,7 @@
])
def testNumSlicesInDimensionRaises(self, lengths, axis, error_type,
error_regex):
- original = RaggedShape.from_lengths(lengths)
+ original = DynamicRaggedShape.from_lengths(lengths)
with self.assertRaisesRegex(error_type, error_regex):
original._num_slices_in_dimension(axis)
@@ -397,7 +402,7 @@
])
def testAltInnerShapeRaises(self, lengths, new_dense_rank, error_type,
error_regex):
- original = RaggedShape.from_lengths(lengths)
+ original = DynamicRaggedShape.from_lengths(lengths)
with self.assertRaisesRegex(error_type, error_regex):
original._alt_inner_shape(new_dense_rank)
@@ -407,7 +412,7 @@
4]),
])
def testAltInnerShape(self, lengths, new_dense_rank, expected_inner_shape):
- original = RaggedShape.from_lengths(lengths)
+ original = DynamicRaggedShape.from_lengths(lengths)
actual = original._alt_inner_shape(new_dense_rank)
self.assertAllEqual(actual, expected_inner_shape)
@@ -415,12 +420,12 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec([3], dtypes.int64)])
def fun(x):
- shape = RaggedShape([
+ shape = DynamicRaggedShape([
RowPartition.from_row_lengths([1, 3], dtype=dtypes.int64),
RowPartition.from_row_lengths([2, 3, 4, 5], dtype=dtypes.int64)
], x)
result = shape._with_num_row_partitions(3)
- expected = RaggedShape([
+ expected = DynamicRaggedShape([
RowPartition.from_row_lengths([1, 3], dtype=dtypes.int64),
RowPartition.from_row_lengths([2, 3, 4, 5], dtype=dtypes.int64),
RowPartition.from_uniform_row_length(
@@ -438,7 +443,7 @@
])
def testWithDenseRankRaises(self, lengths, new_dense_rank, error_type,
error_regex):
- original = RaggedShape.from_lengths(lengths)
+ original = DynamicRaggedShape.from_lengths(lengths)
with self.assertRaisesRegex(error_type, error_regex):
original.with_inner_rank(new_dense_rank)
@@ -461,12 +466,12 @@
])
def testWithNumRowPartitionsRaises(self, lengths, num_row_partitions,
error_type, error_regex):
- original = RaggedShape.from_lengths(lengths)
+ original = DynamicRaggedShape.from_lengths(lengths)
with self.assertRaisesRegex(error_type, error_regex):
original._with_num_row_partitions(num_row_partitions)
def testDimensionRaises(self):
- original = RaggedShape.from_lengths([2, (1, 2)])
+ original = DynamicRaggedShape.from_lengths([2, (1, 2)])
with self.assertRaisesRegex(TypeError, 'index should be an int'):
# This error is not exposed directly to the end user.
original._dimension(0.5)
@@ -525,10 +530,10 @@
s,
expected_lengths,
num_row_partitions=None):
- original = RaggedShape.from_lengths(lengths)
+ original = DynamicRaggedShape.from_lengths(lengths)
if num_row_partitions is not None:
original = original._with_num_row_partitions(num_row_partitions)
- expected = RaggedShape.from_lengths(expected_lengths)
+ expected = DynamicRaggedShape.from_lengths(expected_lengths)
actual = original[s]
self.assertShapeEq(expected, actual)
@@ -567,7 +572,7 @@
error_type,
error_regex,
num_row_partitions=None):
- original = RaggedShape.from_lengths(lengths)
+ original = DynamicRaggedShape.from_lengths(lengths)
if num_row_partitions is not None:
original = original._with_num_row_partitions(num_row_partitions)
with self.assertRaisesRegex(error_type, error_regex):
@@ -578,8 +583,8 @@
uniform_row_length=1)
expected = RaggedTensor.from_uniform_row_length([3, 3, 4, 4, 5, 5],
uniform_row_length=2)
- expected_shape = RaggedShape.from_tensor(expected)
- actual = ragged_shape.broadcast_to(origin, expected_shape)
+ expected_shape = DynamicRaggedShape.from_tensor(expected)
+ actual = dynamic_ragged_shape.broadcast_to(origin, expected_shape)
self.assertAllEqual(actual, expected)
@parameterized.parameters([
@@ -611,19 +616,19 @@
expected_gather_indices,
source_num_row_partitions=None,
target_num_row_partitions=None):
- source = RaggedShape.from_lengths(source_lengths)
+ source = DynamicRaggedShape.from_lengths(source_lengths)
if source_num_row_partitions is not None:
source = source._with_num_row_partitions(source_num_row_partitions)
- target = RaggedShape.from_lengths(target_lengths)
+ target = DynamicRaggedShape.from_lengths(target_lengths)
if target_num_row_partitions is not None:
target = target._with_num_row_partitions(target_num_row_partitions)
expected_gather_indices = [
_LayerBroadcaster.from_gather_index(x) for x in expected_gather_indices
]
- actual = ragged_shape._get_broadcaster(source, target)
- expected = ragged_shape._Broadcaster(source, target,
- expected_gather_indices)
+ actual = dynamic_ragged_shape._get_broadcaster(source, target)
+ expected = dynamic_ragged_shape._Broadcaster(source, target,
+ expected_gather_indices)
self.assertBroadcasterEq(actual, expected)
def testRaggedGradientSimple1(self):
@@ -693,10 +698,10 @@
if context.executing_eagerly():
return
def func(x):
- target_shape = RaggedShape.from_row_partitions(
+ target_shape = DynamicRaggedShape.from_row_partitions(
[RowPartition.from_row_splits(row_splits=[0, 4, 7, 8])])
- rt = ragged_shape.broadcast_to(x, target_shape)
+ rt = dynamic_ragged_shape.broadcast_to(x, target_shape)
return rt.flat_values
x = constant_op.constant([[3.0], [1.0], [4.0]])
@@ -708,8 +713,8 @@
def testBroadcastScalarToScalar(self):
origin = constant_op.constant(b'x')
expected = origin
- expected_shape = RaggedShape.from_tensor(expected)
- actual = ragged_shape.broadcast_to(origin, expected_shape)
+ expected_shape = DynamicRaggedShape.from_tensor(expected)
+ actual = dynamic_ragged_shape.broadcast_to(origin, expected_shape)
self.assertAllEqual(actual, expected)
@parameterized.parameters([
@@ -720,7 +725,7 @@
dict(lengths=[2, (2, 3), 7, 4], num_row_partitions=2, axis=3),
])
def testIsUniformTrue(self, lengths, axis, num_row_partitions=None):
- shape = RaggedShape.from_lengths(lengths)
+ shape = DynamicRaggedShape.from_lengths(lengths)
if num_row_partitions is not None:
shape = shape._with_num_row_partitions(num_row_partitions)
actual = shape.is_uniform(axis)
@@ -734,7 +739,7 @@
axis=3),
])
def testIsUniformFalse(self, lengths, num_row_partitions, axis):
- shape = RaggedShape.from_lengths(lengths)._with_num_row_partitions(
+ shape = DynamicRaggedShape.from_lengths(lengths)._with_num_row_partitions(
num_row_partitions)
actual = shape.is_uniform(axis)
self.assertFalse(actual)
@@ -761,7 +766,7 @@
])
def testIsUniformRaises(self, lengths, num_row_partitions, axis, error_type,
error_regex):
- shape = RaggedShape.from_lengths(lengths)._with_num_row_partitions(
+ shape = DynamicRaggedShape.from_lengths(lengths)._with_num_row_partitions(
num_row_partitions)
with self.assertRaisesRegex(error_type, error_regex):
shape.is_uniform(axis)
@@ -779,7 +784,7 @@
])
def testWithNumRowPartitions(self, lengths, num_row_partitions_a,
num_row_partitions_b):
- shape = RaggedShape.from_lengths(lengths)
+ shape = DynamicRaggedShape.from_lengths(lengths)
original_row_partitions = shape.num_row_partitions
shape_a = shape._with_num_row_partitions(num_row_partitions_a)
self.assertEqual(shape_a.num_row_partitions, num_row_partitions_a)
@@ -805,14 +810,15 @@
dict(lengths=[3, 4, 5], axis=2, expected=5),
])
def testGetItem(self, lengths, axis, expected, num_row_partitions=None):
- shape = RaggedShape.from_lengths(lengths)
+ shape = DynamicRaggedShape.from_lengths(lengths)
if num_row_partitions is not None:
shape = shape._with_num_row_partitions(num_row_partitions)
actual = shape[axis]
self.assertAllEqual(actual, expected)
def testNumElements(self):
- shape = RaggedShape.from_lengths([2, 3, 4, 5])._with_num_row_partitions(2)
+ shape = DynamicRaggedShape.from_lengths([2, 3, 4,
+ 5])._with_num_row_partitions(2)
self.assertAllEqual(shape._num_elements(), 120)
def test_to_row_partitions_from_lengths(self):
@@ -856,12 +862,12 @@
expected,
expected_num_row_partitions=None):
origin = constant_op.constant(origin)
- expected_shape = RaggedShape.from_lengths(expected_lengths)
+ expected_shape = DynamicRaggedShape.from_lengths(expected_lengths)
if expected_num_row_partitions is not None:
expected_shape = expected_shape._with_num_row_partitions(
expected_num_row_partitions)
expected = ragged_factory_ops.constant_value(expected)
- actual = ragged_shape.broadcast_to(origin, expected_shape)
+ actual = dynamic_ragged_shape.broadcast_to(origin, expected_shape)
self.assertAllEqual(actual, expected)
def testBroadcastFlatValues(self):
@@ -871,12 +877,13 @@
b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l',
b'm', b'n', b'o', b'p'
])
- origin_shape = RaggedShape.from_lengths(
+ origin_shape = DynamicRaggedShape.from_lengths(
origin_lengths)._with_num_row_partitions(3)
- dest_shape = RaggedShape.from_lengths(
+ dest_shape = DynamicRaggedShape.from_lengths(
dest_lengths)._with_num_row_partitions(5)
- broadcaster = ragged_shape._get_broadcaster(origin_shape, dest_shape)
+ broadcaster = dynamic_ragged_shape._get_broadcaster(origin_shape,
+ dest_shape)
actual = broadcaster.broadcast_flat_values(origin_values)
@@ -960,17 +967,17 @@
expected_lengths, expected_values):
origin = _to_ragged_tensor_from_lengths(origin_values, origin_lengths)
expected = _to_ragged_tensor_from_lengths(expected_values, expected_lengths)
- expected_shape = RaggedShape.from_tensor(expected)
- actual = ragged_shape.broadcast_to(origin, expected_shape)
+ expected_shape = DynamicRaggedShape.from_tensor(expected)
+ actual = dynamic_ragged_shape.broadcast_to(origin, expected_shape)
self.assertAllEqual(actual, expected)
- def testRaggedShapeFromTensor2(self):
+ def testDynamicRaggedShapeFromTensor2(self):
raw_rt = [[[[7, 4], [5, 6]], [[1, 2], [3, 7]]], [[[7, 4], [5, 6]]],
[[[1, 2], [3, 7]]]]
raw_rt = ragged_factory_ops.constant_value(raw_rt)
- actual_shape = RaggedShape.from_tensor(raw_rt)
- expected_shape = RaggedShape.from_lengths([3, (2, 1, 1), 2,
- 2])._with_num_row_partitions(3)
+ actual_shape = DynamicRaggedShape.from_tensor(raw_rt)
+ expected_shape = DynamicRaggedShape.from_lengths(
+ [3, (2, 1, 1), 2, 2])._with_num_row_partitions(3)
self.assertShapeEq(actual_shape, expected_shape)
# pylint: disable=g-long-lambda
@@ -996,7 +1003,7 @@
inner_shape=lambda: [3],
validate=False,
error_type=ValueError,
- error_regex='RowPartitions in RaggedShape do not'),
+ error_regex='RowPartitions in DynamicRaggedShape do not'),
# nvals and inner_shape[0] don't match (3 != 6) statically
dict(
row_partitions=lambda: [
@@ -1017,7 +1024,8 @@
row_partitions = row_partitions()
inner_shape = inner_shape()
with self.assertRaisesRegex(error_type, error_regex):
- RaggedShape(row_partitions, inner_shape, dtype=dtype, validate=validate)
+ DynamicRaggedShape(
+ row_partitions, inner_shape, dtype=dtype, validate=validate)
def testConstructorStaticOK(self):
row_partitions = [
@@ -1025,7 +1033,7 @@
RowPartition.from_value_rowids([0, 1, 2], nrows=3)
]
inner_shape = [3]
- rts = RaggedShape(row_partitions, inner_shape, validate=True)
+ rts = DynamicRaggedShape(row_partitions, inner_shape, validate=True)
static_inner_shape = tensor_util.constant_value(rts.inner_shape)
static_valid_rowids0 = tensor_util.constant_value(
rts.row_partitions[0].value_rowids())
@@ -1036,30 +1044,30 @@
self.assertAllEqual(static_valid_rowids1, [0, 1, 2])
def testZeros(self):
- shape_x = RaggedShape.from_lengths([3, (1, 3, 2), 4])
+ shape_x = DynamicRaggedShape.from_lengths([3, (1, 3, 2), 4])
foo = ragged_array_ops.zeros(shape_x)
- self.assertShapeEq(shape_x, RaggedShape.from_tensor(foo))
+ self.assertShapeEq(shape_x, DynamicRaggedShape.from_tensor(foo))
self.assertAllEqual(array_ops.zeros([6, 4]), foo.flat_values)
def testOnes(self):
- shape_x = RaggedShape.from_lengths([3, (1, 3, 2), 4])
+ shape_x = DynamicRaggedShape.from_lengths([3, (1, 3, 2), 4])
foo = ragged_array_ops.ones(shape_x)
- self.assertShapeEq(shape_x, RaggedShape.from_tensor(foo))
+ self.assertShapeEq(shape_x, DynamicRaggedShape.from_tensor(foo))
self.assertAllEqual(array_ops.ones([6, 4]), foo.flat_values)
def testReshapeTensor(self):
foo = array_ops.zeros([3, 2, 4])
- shape_b = RaggedShape.from_lengths([3, (3, 2, 1), 4])
+ shape_b = DynamicRaggedShape.from_lengths([3, (3, 2, 1), 4])
result = ragged_array_ops.ragged_reshape(foo, shape_b)
- self.assertShapeEq(shape_b, RaggedShape.from_tensor(result))
+ self.assertShapeEq(shape_b, DynamicRaggedShape.from_tensor(result))
self.assertAllEqual(array_ops.zeros([6, 4]), result.flat_values)
def test_reshape_ragged_tensor(self):
- shape_x = RaggedShape.from_lengths([3, (1, 3, 2), 4])
+ shape_x = DynamicRaggedShape.from_lengths([3, (1, 3, 2), 4])
foo = ragged_array_ops.zeros(shape_x)
- shape_b = RaggedShape.from_lengths([3, (3, 2, 1), 4])
+ shape_b = DynamicRaggedShape.from_lengths([3, (3, 2, 1), 4])
result = ragged_array_ops.ragged_reshape(foo, shape_b)
- self.assertShapeEq(shape_b, RaggedShape.from_tensor(result))
+ self.assertShapeEq(shape_b, DynamicRaggedShape.from_tensor(result))
self.assertAllEqual(array_ops.zeros([6, 4]), result.flat_values)
@parameterized.parameters([
@@ -1173,20 +1181,20 @@
# Whether the shape generated is correct.
# Whether broadcasting is the same as broadcast_to.
# Instead of specifying values, it just uses primes.
- shape_a = RaggedShape.from_lengths(lengths_a)
+ shape_a = DynamicRaggedShape.from_lengths(lengths_a)
if num_row_partitions_a is not None:
shape_a = shape_a._with_num_row_partitions(num_row_partitions_a)
- shape_b = RaggedShape.from_lengths(lengths_b)
+ shape_b = DynamicRaggedShape.from_lengths(lengths_b)
if num_row_partitions_b is not None:
shape_b = shape_b._with_num_row_partitions(num_row_partitions_b)
- shape_e = RaggedShape.from_lengths(lengths_e)
+ shape_e = DynamicRaggedShape.from_lengths(lengths_e)
if num_row_partitions_e is not None:
shape_e = shape_e._with_num_row_partitions(num_row_partitions_e)
[actual, bc_a, bc_b
- ] = ragged_shape.broadcast_dynamic_shape_extended(shape_a, shape_b)
+ ] = dynamic_ragged_shape.broadcast_dynamic_shape_extended(shape_a, shape_b)
[actual_rev, bc_b_rev, bc_a_rev
- ] = ragged_shape.broadcast_dynamic_shape_extended(shape_b, shape_a)
+ ] = dynamic_ragged_shape.broadcast_dynamic_shape_extended(shape_b, shape_a)
self.assertShapeEq(actual, shape_e)
self.assertShapeEq(actual_rev, shape_e)
@@ -1194,13 +1202,13 @@
_lowest_primes(_num_elements_of_lengths(lengths_a)), shape_a)
bc_a_actual = bc_a.broadcast(rt_a)
bc_a_actual_rev = bc_a_rev.broadcast(rt_a)
- bc_a_expected = ragged_shape.broadcast_to(rt_a, shape_e)
+ bc_a_expected = dynamic_ragged_shape.broadcast_to(rt_a, shape_e)
self.assertAllEqual(bc_a_expected, bc_a_actual)
self.assertAllEqual(bc_a_expected, bc_a_actual_rev)
rt_b = ragged_array_ops.ragged_reshape(
_lowest_primes(_num_elements_of_lengths(lengths_b)), shape_b)
- bc_b_expected = ragged_shape.broadcast_to(rt_b, shape_e)
+ bc_b_expected = dynamic_ragged_shape.broadcast_to(rt_b, shape_e)
bc_b_actual = bc_b.broadcast(rt_b)
bc_b_actual_rev = bc_b_rev.broadcast(rt_b)
self.assertAllEqual(bc_b_expected, bc_b_actual)
@@ -1225,7 +1233,7 @@
])
def testWithDenseRank(self, lengths, dense_rank, lengths_e):
# Makes little sense with from_lengths/_with_num_row_partitions.
- original = RaggedShape.from_lengths(lengths)
+ original = DynamicRaggedShape.from_lengths(lengths)
actual = original.with_inner_rank(dense_rank)
self.assertAllEqual(actual.inner_rank, dense_rank)
self.assertAllEqual(actual.static_lengths(), lengths_e)
@@ -1244,14 +1252,14 @@
])
def testFromRowPartitions(self, rps, lengths_e, num_row_partitions_e):
rps = _to_row_partitions_from_lengths(rps)
- actual = RaggedShape.from_row_partitions(rps)
- expected = RaggedShape.from_lengths(lengths_e)._with_num_row_partitions(
- num_row_partitions_e)
+ actual = DynamicRaggedShape.from_row_partitions(rps)
+ expected = DynamicRaggedShape.from_lengths(
+ lengths_e)._with_num_row_partitions(num_row_partitions_e)
self.assertShapeEq(expected, actual)
def testFromRowPartitionsError(self):
with self.assertRaisesRegex(ValueError, 'row_partitions cannot be empty'):
- RaggedShape.from_row_partitions([])
+ DynamicRaggedShape.from_row_partitions([])
@parameterized.parameters([
#=========================================================================
@@ -1378,19 +1386,19 @@
def testBroadcastDimension(self, original_lengths, broadcast_lengths):
"""Tests broadcast_to on a single dimension."""
original_rt = _to_prime_tensor_from_lengths(original_lengths)
- bcast_shape = RaggedShape.from_lengths(broadcast_lengths)
- result_rt = ragged_shape.broadcast_to(original_rt, bcast_shape)
- result_shape = RaggedShape.from_tensor(result_rt)
+ bcast_shape = DynamicRaggedShape.from_lengths(broadcast_lengths)
+ result_rt = dynamic_ragged_shape.broadcast_to(original_rt, bcast_shape)
+ result_shape = DynamicRaggedShape.from_tensor(result_rt)
self.assertShapeEq(bcast_shape, result_shape)
def testAsRowPartitions(self):
- my_shape = RaggedShape.from_lengths([3, (2, 0, 1), 5])
+ my_shape = DynamicRaggedShape.from_lengths([3, (2, 0, 1), 5])
rps = my_shape._as_row_partitions()
self.assertLen(rps, 2)
def testAsRowPartitionsRaises(self):
- my_shape = RaggedShape.from_lengths([])
+ my_shape = DynamicRaggedShape.from_lengths([])
with self.assertRaisesRegex(ValueError,
'rank must be >= 1 for _as_row_partitions'):
my_shape._as_row_partitions()
@@ -1458,25 +1466,25 @@
expected_dims=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]),
])
def testBroadcastDynamicShape(self, x_dims, y_dims, expected_dims):
- shape_a = RaggedShape.from_lengths(x_dims)
- shape_b = RaggedShape.from_lengths(y_dims)
- shape_e = RaggedShape.from_lengths(expected_dims)
+ shape_a = DynamicRaggedShape.from_lengths(x_dims)
+ shape_b = DynamicRaggedShape.from_lengths(y_dims)
+ shape_e = DynamicRaggedShape.from_lengths(expected_dims)
[actual, bc_a, bc_b
- ] = ragged_shape.broadcast_dynamic_shape_extended(shape_a, shape_b)
+ ] = dynamic_ragged_shape.broadcast_dynamic_shape_extended(shape_a, shape_b)
[actual_rev, bc_b_rev, bc_a_rev
- ] = ragged_shape.broadcast_dynamic_shape_extended(shape_b, shape_a)
+ ] = dynamic_ragged_shape.broadcast_dynamic_shape_extended(shape_b, shape_a)
self.assertShapeEq(actual, shape_e)
self.assertShapeEq(actual_rev, shape_e)
rt_a = _to_prime_tensor_from_lengths(x_dims)
bc_a_actual = bc_a.broadcast(rt_a)
bc_a_actual_rev = bc_a_rev.broadcast(rt_a)
- bc_a_expected = ragged_shape.broadcast_to(rt_a, shape_e)
+ bc_a_expected = dynamic_ragged_shape.broadcast_to(rt_a, shape_e)
self.assertAllEqual(bc_a_expected, bc_a_actual)
self.assertAllEqual(bc_a_expected, bc_a_actual_rev)
rt_b = _to_prime_tensor_from_lengths(y_dims)
- bc_b_expected = ragged_shape.broadcast_to(rt_b, shape_e)
+ bc_b_expected = dynamic_ragged_shape.broadcast_to(rt_b, shape_e)
bc_b_actual = bc_b.broadcast(rt_b)
bc_b_actual_rev = bc_b_rev.broadcast(rt_b)
self.assertAllEqual(bc_b_expected, bc_b_actual)
@@ -1484,7 +1492,7 @@
# This just wraps broadcast_dynamic_shape_extended, so nothing
# deeper is required.
- result1 = ragged_shape.broadcast_dynamic_shape(shape_a, shape_b)
+ result1 = dynamic_ragged_shape.broadcast_dynamic_shape(shape_a, shape_b)
self.assertShapeEq(shape_e, result1)
# Again, just a wrapper.
@@ -1495,7 +1503,7 @@
a_0 = constant_op.constant(1, dtypes.int64)
b_0 = constant_op.constant(3, dtypes.int64)
[a_layer, b_layer
- ] = ragged_shape._broadcast_dynamic_shape_first_layer(a_0, b_0)
+ ] = dynamic_ragged_shape._broadcast_dynamic_shape_first_layer(a_0, b_0)
expected_a_layer = _LayerBroadcaster.from_gather_index([0, 0, 0])
expected_b_layer = _LayerBroadcaster.from_gather_index([0, 1, 2])
self.assertLayerBroadcasterEq(expected_a_layer, a_layer)
@@ -1509,23 +1517,23 @@
constant_op.constant([0, 0, 0], dtype=dtypes.int64))
bc_0 = _LayerBroadcaster.from_gather_index(
constant_op.constant([0, 1, 2], dtype=dtypes.int64))
- ragged_shape._broadcast_dynamic_shape_next_layer_half_ragged(
+ dynamic_ragged_shape._broadcast_dynamic_shape_next_layer_half_ragged(
ac_0, bc_0, a_1, b_1)
def testBroadcastDynamicShapeRaisesLeft(self):
- shape = RaggedShape.from_tensor(constant_op.constant([1, 2, 3]))
+ shape = DynamicRaggedShape.from_tensor(constant_op.constant([1, 2, 3]))
with self.assertRaisesRegex(TypeError, 'shape_x must be'):
- ragged_shape.broadcast_dynamic_shape(1, shape)
+ dynamic_ragged_shape.broadcast_dynamic_shape(1, shape)
def testBroadcastDynamicShapeRaisesRight(self):
- shape = RaggedShape.from_tensor(constant_op.constant([1, 2, 3]))
+ shape = DynamicRaggedShape.from_tensor(constant_op.constant([1, 2, 3]))
with self.assertRaisesRegex(TypeError, 'shape_y must be'):
- ragged_shape.broadcast_dynamic_shape(shape, 1)
+ dynamic_ragged_shape.broadcast_dynamic_shape(shape, 1)
def testBroadcastToRaises(self):
rt = constant_op.constant([1, 2, 3])
with self.assertRaisesRegex(TypeError, 'shape must be'):
- ragged_shape.broadcast_to(rt, 1)
+ dynamic_ragged_shape.broadcast_to(rt, 1)
@parameterized.parameters([
dict(
@@ -1563,14 +1571,14 @@
[[[1, 1], [2, 2]], [[3, 3]]], ragged_rank=1)),
])
def testRaggedBroadcastTo(self, x, lengths, expected):
- shape = RaggedShape.from_lengths(lengths)
- result = ragged_shape.broadcast_to(x, shape)
+ shape = DynamicRaggedShape.from_lengths(lengths)
+ result = dynamic_ragged_shape.broadcast_to(x, shape)
self.assertEqual(
getattr(result, 'num_row_partitions', 0),
getattr(expected, 'num_row_partitions', 0))
self.assertAllEqual(result, expected)
- # broadcast_to just calls ragged_shape.broadcast_to, so
+ # broadcast_to just calls dynamic_ragged_shape.broadcast_to, so
# this should be sufficient.
result2 = ragged_array_ops.broadcast_to(x, shape)
self.assertAllEqual(result2, expected)
@@ -1643,7 +1651,7 @@
def testAddSelf(self, lengths_a, new_impl, op_max, num_row_partitions_a=None):
if context.executing_eagerly():
return
- shape_a0 = RaggedShape.from_lengths(
+ shape_a0 = DynamicRaggedShape.from_lengths(
lengths_a, num_row_partitions=num_row_partitions_a)
rt_a = ragged_array_ops.ragged_reshape(
_lowest_primes(_num_elements_of_lengths(lengths_a)), shape_a0)
@@ -1651,8 +1659,8 @@
g = rt_a.flat_values.graph if ragged_tensor.is_ragged(rt_a) else rt_a.graph
nodes_at_a = len(g.as_graph_def().node)
if new_impl:
- ragged_shape.ragged_binary_elementwise_op_impl(gen_math_ops.add_v2, rt_a,
- rt_b)
+ dynamic_ragged_shape.ragged_binary_elementwise_op_impl(
+ gen_math_ops.add_v2, rt_a, rt_b)
nodes_at_b = len(g.as_graph_def().node)
node_delta = nodes_at_b - nodes_at_a
self.assertLessEqual(node_delta, op_max)
@@ -1671,7 +1679,7 @@
return
values = constant_op.constant([True, False, True, True, True])
rt_a = RaggedTensor.from_row_splits(values, [0, 3, 3, 5])
- result = ragged_shape.ragged_binary_elementwise_op_impl(
+ result = dynamic_ragged_shape.ragged_binary_elementwise_op_impl(
gen_math_ops.logical_and, rt_a, rt_a)
expected_values = values
@@ -1687,7 +1695,7 @@
b = constant_op.constant(3)
rt_expected = ragged_factory_ops.constant([[True, False, True], [True]])
- result = ragged_shape.ragged_binary_elementwise_op_impl(
+ result = dynamic_ragged_shape.ragged_binary_elementwise_op_impl(
math_ops.equal, rt_a, b)
self.assertAllEqual(result, rt_expected)
@@ -1730,9 +1738,9 @@
if context.executing_eagerly():
return
- shape_a0 = RaggedShape.from_lengths(
+ shape_a0 = DynamicRaggedShape.from_lengths(
lengths_a, num_row_partitions=num_row_partitions_a)
- shape_b0 = RaggedShape.from_lengths(
+ shape_b0 = DynamicRaggedShape.from_lengths(
lengths_b, num_row_partitions=num_row_partitions_b)
rt_a = ragged_array_ops.ragged_reshape(
_lowest_primes(_num_elements_of_lengths(lengths_a)), shape_a0)
@@ -1742,8 +1750,10 @@
nodes_at_a = len(g.as_graph_def().node)
if new_impl:
- ragged_shape.ragged_binary_elementwise_op_impl(gen_math_ops.add_v2, rt_a,
- rt_b)
+ dynamic_ragged_shape.ragged_binary_elementwise_op_impl(
+ gen_math_ops.add_v2,
+ rt_a,
+ rt_b)
nodes_at_b = len(g.as_graph_def().node)
num_nodes = nodes_at_b - nodes_at_a
self.assertLessEqual(num_nodes, max_num_ops)
@@ -1798,16 +1808,16 @@
num_row_partitions_b=None):
if context.executing_eagerly():
return
- shape_a = RaggedShape.from_lengths(
+ shape_a = DynamicRaggedShape.from_lengths(
lengths_a, num_row_partitions=num_row_partitions_a)
- shape_b = RaggedShape.from_lengths(
+ shape_b = DynamicRaggedShape.from_lengths(
lengths_b, num_row_partitions=num_row_partitions_b)
rt_a = ragged_array_ops.ragged_reshape(
_lowest_primes(_num_elements_of_lengths(lengths_a)), shape_a)
rt_b = ragged_array_ops.ragged_reshape(
_lowest_primes(_num_elements_of_lengths(lengths_b)), shape_b)
if new_impl:
- result = ragged_shape.ragged_binary_elementwise_op_impl(
+ result = dynamic_ragged_shape.ragged_binary_elementwise_op_impl(
math_ops.add, rt_a, rt_b)
shape_e = tensor_shape.TensorShape(shape_e)
self.assertEqual(shape_e.as_list(), result.shape.as_list())
@@ -1849,12 +1859,12 @@
num_row_partitions_b=None):
if context.executing_eagerly():
return
- shape_a = RaggedShape.from_lengths(
+ shape_a = DynamicRaggedShape.from_lengths(
lengths_a, num_row_partitions=num_row_partitions_a)
- shape_b = RaggedShape.from_lengths(
+ shape_b = DynamicRaggedShape.from_lengths(
lengths_b, num_row_partitions=num_row_partitions_b)
- result = ragged_shape.broadcast_dynamic_shape(shape_a, shape_b)
+ result = dynamic_ragged_shape.broadcast_dynamic_shape(shape_a, shape_b)
result_shape = result._to_tensor_shape()
tensor_shape_e = [None if isinstance(x, tuple) else x for x in shape_e]
@@ -1867,9 +1877,9 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int64)])
def fun(x):
- shape_a = RaggedShape([], array_ops.stack([5, x, 3]))
- shape_b = RaggedShape.from_lengths([1, 3], dtype=dtypes.int64)
- result = ragged_shape.broadcast_dynamic_shape(shape_a, shape_b)
+ shape_a = DynamicRaggedShape([], array_ops.stack([5, x, 3]))
+ shape_b = DynamicRaggedShape.from_lengths([1, 3], dtype=dtypes.int64)
+ result = dynamic_ragged_shape.broadcast_dynamic_shape(shape_a, shape_b)
self.assertAllEqual([5, None, 3], result.static_lengths())
fun(constant_op.constant(2, dtype=dtypes.int64))
@@ -1879,9 +1889,9 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int64)])
def fun(x):
- shape_a = RaggedShape([], array_ops.stack([5, x, 3]))
- shape_b = RaggedShape.from_lengths([2, 3], dtype=dtypes.int64)
- result = ragged_shape.broadcast_dynamic_shape(shape_a, shape_b)
+ shape_a = DynamicRaggedShape([], array_ops.stack([5, x, 3]))
+ shape_b = DynamicRaggedShape.from_lengths([2, 3], dtype=dtypes.int64)
+ result = dynamic_ragged_shape.broadcast_dynamic_shape(shape_a, shape_b)
self.assertAllEqual([5, 2, 3], result.static_lengths())
fun(constant_op.constant(2, dtype=dtypes.int64))
@@ -1889,7 +1899,7 @@
if context.executing_eagerly():
return
rp = RowPartition.from_row_lengths([4, 2, 3])
- result = RaggedShape.from_row_partitions([rp])
+ result = DynamicRaggedShape.from_row_partitions([rp])
self.assertEqual([3, (4, 2, 3)], result.static_lengths())
@parameterized.parameters([
@@ -1916,7 +1926,7 @@
def testDimStatic(self, lengths_a, dim, expected):
if context.executing_eagerly():
return
- shape_a = RaggedShape.from_lengths(lengths_a)
+ shape_a = DynamicRaggedShape.from_lengths(lengths_a)
result = tensor_util.constant_value(shape_a[dim])
self.assertEqual(result, expected)
@@ -1937,7 +1947,7 @@
num_row_partitions_a=None):
if context.executing_eagerly():
return
- shape_a = RaggedShape.from_lengths(
+ shape_a = DynamicRaggedShape.from_lengths(
lengths_a, num_row_partitions=num_row_partitions_a)
result = shape_a._with_num_row_partitions(new_num_row_partitions)
self.assertEqual(shape_e, result.static_lengths())
@@ -1949,7 +1959,7 @@
def testFromLengthsNRowsStatic(self, lengths_a):
if context.executing_eagerly():
return
- shape_a = RaggedShape.from_lengths(lengths_a)
+ shape_a = DynamicRaggedShape.from_lengths(lengths_a)
for rp in shape_a.row_partitions:
actual = tensor_util.constant_value(rp.nrows())
self.assertIsNotNone(actual, 'Failed on ' + str(rp))
@@ -1974,7 +1984,7 @@
num_row_partitions_a=None):
if context.executing_eagerly():
return
- shape_a = RaggedShape.from_lengths(
+ shape_a = DynamicRaggedShape.from_lengths(
lengths_a, num_row_partitions=num_row_partitions_a)
result = shape_a._alt_inner_shape(new_inner_rank)
result_static = tensor_util.constant_value_as_shape(result)
@@ -2008,7 +2018,7 @@
# In particular, any uniform_row_length should be reproduced.
if context.executing_eagerly():
return
- shape = RaggedShape.from_lengths(
+ shape = DynamicRaggedShape.from_lengths(
lengths, num_row_partitions=num_row_partitions)
rt_a = ragged_array_ops.ragged_reshape(
_lowest_primes(_num_elements_of_lengths(lengths)), shape)
@@ -2030,7 +2040,7 @@
# Note that this test loses the later static values.
if context.executing_eagerly():
return
- shape = RaggedShape.from_lengths(
+ shape = DynamicRaggedShape.from_lengths(
lengths, num_row_partitions=num_row_partitions)
shape_b = shape._with_num_row_partitions(shape.rank - 1)
self.assertEqual(shape_e, shape_b.static_lengths())
@@ -2039,14 +2049,15 @@
# Note that this test loses the later static values.
if context.executing_eagerly():
return
- shape = RaggedShape.from_lengths(
+ shape = DynamicRaggedShape.from_lengths(
[5, 2, 3], num_row_partitions=2)
shape_b = shape._with_num_row_partitions(0)
self.assertEqual([5, 2, 3], shape_b.static_lengths())
def testWithNumRowPartitionsDType(self):
# Note that this test loses the later static values.
- shape = RaggedShape([], constant_op.constant([5, 2, 3], dtype=dtypes.int32))
+ shape = DynamicRaggedShape([], constant_op.constant([5, 2, 3],
+ dtype=dtypes.int32))
self.assertEqual(shape.dtype, dtypes.int32)
result = shape._with_num_row_partitions(2)
@@ -2106,8 +2117,8 @@
expected_rrank = getattr(expected, 'num_row_partitions', 0)
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, dtype=dtypes.int32)
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, dtype=dtypes.int32)
- result = ragged_shape.ragged_binary_elementwise_op_impl(gen_math_ops.add_v2,
- x, y)
+ result = dynamic_ragged_shape.ragged_binary_elementwise_op_impl(
+ gen_math_ops.add_v2, x, y)
result_rrank = getattr(result, 'num_row_partitions', 0)
self.assertEqual(expected_rrank, result_rrank)
if hasattr(expected, 'tolist'):
@@ -2115,7 +2126,7 @@
self.assertAllEqual(result, expected)
def testDimensions(self):
- a = RaggedShape._from_inner_shape([1, 2, 3])
+ a = DynamicRaggedShape._from_inner_shape([1, 2, 3])
self.assertAllEqual(1, a._dimension(0))
@parameterized.parameters([
@@ -2132,7 +2143,7 @@
num_row_partitions,
expected,
expected_eager=None):
- a = RaggedShape.from_lengths(lengths)._with_num_row_partitions(
+ a = DynamicRaggedShape.from_lengths(lengths)._with_num_row_partitions(
num_row_partitions)
actual = a.static_lengths()
if context.executing_eagerly() and expected_eager is not None:
@@ -2145,7 +2156,7 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(row_lengths):
- a = RaggedShape([RowPartition.from_row_lengths(row_lengths)], [6])
+ a = DynamicRaggedShape([RowPartition.from_row_lengths(row_lengths)], [6])
actual = a.static_lengths()
self.assertAllEqual([None, None], actual)
@@ -2157,17 +2168,19 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(inner_shape):
- a = RaggedShape([RowPartition.from_row_lengths([3, 3])], inner_shape)
+ a = DynamicRaggedShape([RowPartition.from_row_lengths([3, 3])],
+ inner_shape)
actual = a.static_lengths()
self.assertAllEqual([2, (3, 3), ...], actual)
foo([6, 3])
def testReprRankKnown(self):
- a = RaggedShape.from_lengths([2, (1, 2), 3])
+ a = DynamicRaggedShape.from_lengths([2, (1, 2), 3])
actual = str(a)
self.assertEqual(
- '<RaggedShape lengths=[2, (1, 2), 3] num_row_partitions=1>', actual)
+ '<DynamicRaggedShape lengths=[2, (1, 2), 3] num_row_partitions=1>',
+ actual)
def assertDimsEqual(self, x: tensor_shape.TensorShape,
y: tensor_shape.TensorShape):
@@ -2182,7 +2195,7 @@
self.assertAllEqual(x.as_list(), y.as_list())
def testToTensorShapeRankKnown(self):
- a = RaggedShape.from_lengths([2, (1, 2), 3])
+ a = DynamicRaggedShape.from_lengths([2, (1, 2), 3])
actual = a._to_tensor_shape()
self.assertDimsEqual(tensor_shape.TensorShape([2, None, 3]), actual)
@@ -2191,10 +2204,12 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(inner_shape):
- a = RaggedShape([RowPartition.from_row_lengths([3, 3])], inner_shape)
+ a = DynamicRaggedShape([RowPartition.from_row_lengths([3, 3])],
+ inner_shape)
actual = str(a)
self.assertEqual(
- '<RaggedShape lengths=[2, (3, 3), ...] num_row_partitions=1>', actual)
+ '<DynamicRaggedShape lengths=[2, (3, 3), ...] num_row_partitions=1>',
+ actual)
foo([6, 3])
@@ -2202,7 +2217,8 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(inner_shape):
- a = RaggedShape([RowPartition.from_row_lengths([3, 3])], inner_shape)
+ a = DynamicRaggedShape([RowPartition.from_row_lengths([3, 3])],
+ inner_shape)
actual = a._to_tensor_shape()
self.assertDimsEqual(
tensor_shape.TensorShape(None), actual)
@@ -2210,10 +2226,10 @@
foo([6, 3])
def testBroadcastDynamicShapeExtendedRankOne(self):
- a = RaggedShape._from_inner_shape([1])
- b = RaggedShape._from_inner_shape([3])
- (c, ac, bc) = ragged_shape.broadcast_dynamic_shape_extended(a, b)
- expected_c = RaggedShape._from_inner_shape([3])
+ a = DynamicRaggedShape._from_inner_shape([1])
+ b = DynamicRaggedShape._from_inner_shape([3])
+ (c, ac, bc) = dynamic_ragged_shape.broadcast_dynamic_shape_extended(a, b)
+ expected_c = DynamicRaggedShape._from_inner_shape([3])
self.assertShapeEq(c, expected_c)
ac_result = ac.broadcast(constant_op.constant([4]))
self.assertAllEqual(ac_result, [4, 4, 4])
@@ -2221,10 +2237,10 @@
self.assertAllEqual(bc_result, [4, 7, 1])
def testBroadcastDynamicShapeExtendedRankOneRev(self):
- a = RaggedShape._from_inner_shape([3])
- b = RaggedShape._from_inner_shape([1])
- (c, ac, bc) = ragged_shape.broadcast_dynamic_shape_extended(a, b)
- expected_c = RaggedShape._from_inner_shape([3])
+ a = DynamicRaggedShape._from_inner_shape([3])
+ b = DynamicRaggedShape._from_inner_shape([1])
+ (c, ac, bc) = dynamic_ragged_shape.broadcast_dynamic_shape_extended(a, b)
+ expected_c = DynamicRaggedShape._from_inner_shape([3])
self.assertShapeEq(c, expected_c)
bc_result = bc.broadcast(constant_op.constant([4]))
self.assertAllEqual(bc_result, [4, 4, 4])
@@ -2232,10 +2248,10 @@
self.assertAllEqual(ac_result, [4, 7, 1])
def testBroadcastDynamicShapeExtendedRankOneIdentity(self):
- a = RaggedShape._from_inner_shape([3])
- b = RaggedShape._from_inner_shape([3])
- (c, ac, bc) = ragged_shape.broadcast_dynamic_shape_extended(a, b)
- expected_c = RaggedShape._from_inner_shape([3])
+ a = DynamicRaggedShape._from_inner_shape([3])
+ b = DynamicRaggedShape._from_inner_shape([3])
+ (c, ac, bc) = dynamic_ragged_shape.broadcast_dynamic_shape_extended(a, b)
+ expected_c = DynamicRaggedShape._from_inner_shape([3])
self.assertShapeEq(c, expected_c)
bc_result = bc.broadcast(constant_op.constant([4, 7, 1]))
self.assertAllEqual(bc_result, [4, 7, 1])
@@ -2250,41 +2266,41 @@
### Tests mostly for code coverage ###########################################
def testFindPreferredDtypeIntNone(self):
- actual = ragged_shape._find_dtype(3, None)
+ actual = dynamic_ragged_shape._find_dtype(3, None)
self.assertIsNone(actual)
@parameterized.parameters([
dict(
- source_shape=lambda: RaggedShape._from_inner_shape([3]),
- target_shape=lambda: RaggedShape._from_inner_shape([3]),
+ source_shape=lambda: DynamicRaggedShape._from_inner_shape([3]),
+ target_shape=lambda: DynamicRaggedShape._from_inner_shape([3]),
layer_broadcasters=lambda: [int],
dtype=None,
error_type=TypeError,
error_regex=r'Not a LayerBroadcaster'),
dict(
- source_shape=lambda: RaggedShape._from_inner_shape([3]),
- target_shape=lambda: RaggedShape._from_inner_shape([3]),
+ source_shape=lambda: DynamicRaggedShape._from_inner_shape([3]),
+ target_shape=lambda: DynamicRaggedShape._from_inner_shape([3]),
layer_broadcasters=lambda: _LayerBroadcaster.from_gather_index(
[0, 1, 2]),
dtype=None,
error_type=TypeError,
error_regex=r'layer'),
dict(
- source_shape=lambda: RaggedShape._from_inner_shape([3]),
+ source_shape=lambda: DynamicRaggedShape._from_inner_shape([3]),
target_shape=lambda: None,
layer_broadcasters=lambda:
[_LayerBroadcaster.from_gather_index([0, 1, 2])],
dtype=None,
error_type=TypeError,
- error_regex='target_shape is not a RaggedShape'),
+ error_regex='target_shape is not a DynamicRaggedShape'),
dict(
source_shape=lambda: None,
- target_shape=lambda: RaggedShape._from_inner_shape([3]),
+ target_shape=lambda: DynamicRaggedShape._from_inner_shape([3]),
layer_broadcasters=lambda:
[_LayerBroadcaster.from_gather_index([0, 1, 2])],
dtype=None,
error_type=TypeError,
- error_regex='source_shape is not a RaggedShape')
+ error_regex='source_shape is not a DynamicRaggedShape')
])
def testBroadcasterInitRaises(self, source_shape, target_shape,
layer_broadcasters, dtype, error_type,
@@ -2293,37 +2309,37 @@
target_shape = target_shape()
layer_broadcasters = layer_broadcasters()
with self.assertRaisesRegex(error_type, error_regex):
- ragged_shape._Broadcaster(
+ dynamic_ragged_shape._Broadcaster(
source_shape, target_shape, layer_broadcasters, dtype=dtype)
def testBroadcasterRepr(self):
- source_shape = RaggedShape(
+ source_shape = DynamicRaggedShape(
[RowPartition.from_row_splits(constant_op.constant([0, 1, 2]))],
constant_op.constant([3]))
- target_shape = RaggedShape(
+ target_shape = DynamicRaggedShape(
[RowPartition.from_row_splits(constant_op.constant([0, 1, 2]))],
constant_op.constant([3]))
layer_broadcasters = [
_LayerBroadcaster.from_gather_index(constant_op.constant([0, 1, 2])),
_LayerBroadcaster.from_gather_index(constant_op.constant([0, 1, 2]))
]
- bc = ragged_shape._Broadcaster(source_shape, target_shape,
- layer_broadcasters)
+ bc = dynamic_ragged_shape._Broadcaster(source_shape, target_shape,
+ layer_broadcasters)
actual = str(bc)
- self.assertRegex(actual, '.src_shape..RaggedShape')
+ self.assertRegex(actual, '.src_shape..DynamicRaggedShape')
def testBroadcasterWithDtype(self):
- source_shape = RaggedShape(
+ source_shape = DynamicRaggedShape(
[RowPartition.from_row_splits(constant_op.constant([0, 1, 2]))],
constant_op.constant([3]))
- target_shape = RaggedShape(
+ target_shape = DynamicRaggedShape(
[RowPartition.from_row_splits(constant_op.constant([0, 1, 2]))],
constant_op.constant([3]))
layer_broadcasters = [
_LayerBroadcaster.from_gather_index(constant_op.constant([0, 1, 2])),
_LayerBroadcaster.from_gather_index(constant_op.constant([0, 1, 2]))
]
- bc = ragged_shape._Broadcaster(
+ bc = dynamic_ragged_shape._Broadcaster(
source_shape, target_shape, layer_broadcasters, dtype=dtypes.int32)
bc2 = bc.with_dtype(dtypes.int64)
@@ -2351,40 +2367,40 @@
dtype_right=dtypes.int64)])
def testBroadcastWithDifferentDenseShapeDTypes(self, dtype_left,
dtype_right):
- s_left = RaggedShape._from_inner_shape(constant_op.constant([4, 1],
- dtype_left))
- s_right = RaggedShape._from_inner_shape(constant_op.constant([1, 4],
- dtype_right))
- s_result = ragged_shape.broadcast_dynamic_shape(s_left, s_right)
+ s_left = DynamicRaggedShape._from_inner_shape(
+ constant_op.constant([4, 1], dtype_left))
+ s_right = DynamicRaggedShape._from_inner_shape(
+ constant_op.constant([1, 4], dtype_right))
+ s_result = dynamic_ragged_shape.broadcast_dynamic_shape(s_left, s_right)
self.assertEqual(s_result.dtype, dtypes.int64)
def testBroadcastFlatValuesToDenseExpand(self):
source = RaggedTensor.from_uniform_row_length([0, 1, 2, 3], 2)
- target_shape = RaggedShape._from_inner_shape([1, 2, 2])
- broadcaster = ragged_shape._get_broadcaster(
- RaggedShape.from_tensor(source), target_shape)
+ target_shape = DynamicRaggedShape._from_inner_shape([1, 2, 2])
+ broadcaster = dynamic_ragged_shape._get_broadcaster(
+ DynamicRaggedShape.from_tensor(source), target_shape)
flat_values = broadcaster.broadcast_flat_values(source)
self.assertAllEqual(flat_values, [[[0, 1], [2, 3]]])
# TODO(edloper): Confirm that this is the expected behavior.
def testBroadcastFlatValuesToDenseExpandInnerDimensionsFalse(self):
source = RaggedTensor.from_uniform_row_length([0, 1, 2, 3], 2)
- target_shape = RaggedShape._from_inner_shape([1, 2, 2])
- broadcaster = ragged_shape._get_broadcaster(
- RaggedShape.from_tensor(source), target_shape)
+ target_shape = DynamicRaggedShape._from_inner_shape([1, 2, 2])
+ broadcaster = dynamic_ragged_shape._get_broadcaster(
+ DynamicRaggedShape.from_tensor(source), target_shape)
flat_values = broadcaster.broadcast_flat_values(
source, inner_dimensions=False)
self.assertAllEqual(flat_values, [[0, 1], [2, 3]])
def testGetLayerBroadcastersFromRPSRaisesTypeError(self):
with self.assertRaisesRegex(TypeError, 'Not a _LayerBroadcaster'):
- ragged_shape._get_layer_broadcasters_from_rps(int, [], [])
+ dynamic_ragged_shape._get_layer_broadcasters_from_rps(int, [], [])
def testGetBroadcasterRankDrop(self):
with self.assertRaisesRegex(ValueError, 'Cannot broadcast'):
- a = RaggedShape._from_inner_shape([3, 4, 5])
- b = RaggedShape._from_inner_shape([4, 5])
- ragged_shape._get_broadcaster(a, b)
+ a = DynamicRaggedShape._from_inner_shape([3, 4, 5])
+ b = DynamicRaggedShape._from_inner_shape([4, 5])
+ dynamic_ragged_shape._get_broadcaster(a, b)
@parameterized.parameters([
dict(
@@ -2423,7 +2439,7 @@
a_1 = a_1()
b_1 = b_1()
with self.assertRaisesRegex(error_type, error_regex):
- ragged_shape._broadcast_dynamic_shape_next_layer_half_ragged(
+ dynamic_ragged_shape._broadcast_dynamic_shape_next_layer_half_ragged(
ac_0, bc_0, a_1, b_1)
@parameterized.parameters([
@@ -2463,7 +2479,7 @@
a_1 = a_1()
b_1 = b_1()
with self.assertRaisesRegex(error_type, error_regex):
- ragged_shape._broadcast_dynamic_shape_next_layer_both_uniform(
+ dynamic_ragged_shape._broadcast_dynamic_shape_next_layer_both_uniform(
ac_0, bc_0, a_1, b_1)
@parameterized.parameters([
@@ -2503,7 +2519,7 @@
a_1 = a_1()
b_1 = b_1()
with self.assertRaisesRegex(error_type, error_regex):
- ragged_shape._broadcast_dynamic_shape_next_layer(
+ dynamic_ragged_shape._broadcast_dynamic_shape_next_layer(
ac_0, bc_0, a_1, b_1)
@parameterized.parameters([
@@ -2541,12 +2557,12 @@
(errors_impl.InvalidArgumentError, ValueError),
'Last row partition does not match flat_values.'):
rt = ragged_factory_ops.constant([[3], [4, 5], [6]])
- rt_shape = RaggedShape.from_tensor(rt)
+ rt_shape = DynamicRaggedShape.from_tensor(rt)
new_flat_values = constant_op.constant(['a', 'b', 'c', 'd', 'e'])
rt_shape._add_row_partitions(new_flat_values, validate=True)
-class RaggedShapeErrorTest(parameterized.TestCase):
+class DynamicRaggedShapeErrorTest(parameterized.TestCase):
@parameterized.parameters([
# Broadcast [1, 2, (1, 2)] to [1, 2, (2, 1)] (FAIL)
@@ -2583,9 +2599,9 @@
sess = session.Session()
with sess.as_default():
origin = _to_ragged_tensor_from_lengths(origin_values, origin_lengths)
- expected_shape = RaggedShape.from_lengths(expected_lengths)
+ expected_shape = DynamicRaggedShape.from_lengths(expected_lengths)
- rt = ragged_shape.broadcast_to(origin, expected_shape)
+ rt = dynamic_ragged_shape.broadcast_to(origin, expected_shape)
sess.run([rt])
@parameterized.parameters([
@@ -2597,7 +2613,7 @@
],
inner_shape=lambda: [4],
validate=True,
- error_regex='RowPartitions in RaggedShape do not'),
+ error_regex='RowPartitions in DynamicRaggedShape do not'),
# nvals and inner_shape[0] don't match (3 != 4) dynamically
dict(
row_partitions=lambda: [ # pylint: disable=g-long-lambda
@@ -2619,7 +2635,7 @@
with sess.as_default():
row_partitions = row_partitions()
inner_shape = inner_shape()
- rts = RaggedShape(
+ rts = DynamicRaggedShape(
row_partitions, inner_shape, dtype=dtype, validate=validate)
sess.run([rts.inner_shape])
@@ -2628,7 +2644,7 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(x):
- rts = RaggedShape._from_inner_shape(x)
+ rts = DynamicRaggedShape._from_inner_shape(x)
self.assertIsNone(rts.rank)
foo([3, 7, 5])
@@ -2639,7 +2655,7 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(x):
- rts = RaggedShape._from_inner_shape(x)
+ rts = DynamicRaggedShape._from_inner_shape(x)
rts._num_slices_in_dimension(-1)
foo([3, 7, 5])
@@ -2650,7 +2666,7 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(x):
- rts = RaggedShape._from_inner_shape(x)
+ rts = DynamicRaggedShape._from_inner_shape(x)
rts[-1] # pylint: disable=pointless-statement
foo([3, 7, 5])
@@ -2661,7 +2677,7 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(x):
- rts = RaggedShape._from_inner_shape(x)
+ rts = DynamicRaggedShape._from_inner_shape(x)
rts.with_inner_rank(1)
foo([3, 7, 5])
@@ -2672,7 +2688,7 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(x):
- rts = RaggedShape._from_inner_shape(x)
+ rts = DynamicRaggedShape._from_inner_shape(x)
rts._with_num_row_partitions(1)
foo([3, 7, 5])
@@ -2684,7 +2700,7 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(x):
- rts = RaggedShape._from_inner_shape(x)
+ rts = DynamicRaggedShape._from_inner_shape(x)
rts._as_row_partitions()
foo([3, 7, 5])
@@ -2696,23 +2712,27 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(x):
- a = RaggedShape._from_inner_shape(x)
- b = RaggedShape._from_inner_shape([1, 1, 1])
- ragged_shape.broadcast_dynamic_shape_extended(a, b)
+ a = DynamicRaggedShape._from_inner_shape(x)
+ b = DynamicRaggedShape._from_inner_shape([1, 1, 1])
+ dynamic_ragged_shape.broadcast_dynamic_shape_extended(a, b)
foo([3, 7, 5])
def testBroadcastDynamicShapeUnmatchedTypes6432(self):
- shape_int64 = RaggedShape.from_lengths([3, (0, 2, 3)], dtype=dtypes.int64)
- shape_int32 = RaggedShape.from_lengths([3, (0, 2, 3)], dtype=dtypes.int32)
+ shape_int64 = DynamicRaggedShape.from_lengths([3, (0, 2, 3)],
+ dtype=dtypes.int64)
+ shape_int32 = DynamicRaggedShape.from_lengths([3, (0, 2, 3)],
+ dtype=dtypes.int32)
with self.assertRaisesRegex(ValueError, "Dtypes don't match"):
- ragged_shape.broadcast_dynamic_shape(shape_int64, shape_int32)
+ dynamic_ragged_shape.broadcast_dynamic_shape(shape_int64, shape_int32)
def testBroadcastDynamicShapeUnmatchedTypes3264(self):
- shape_int64 = RaggedShape.from_lengths([3, (0, 2, 3)], dtype=dtypes.int64)
- shape_int32 = RaggedShape.from_lengths([3, (0, 2, 3)], dtype=dtypes.int32)
+ shape_int64 = DynamicRaggedShape.from_lengths([3, (0, 2, 3)],
+ dtype=dtypes.int64)
+ shape_int32 = DynamicRaggedShape.from_lengths([3, (0, 2, 3)],
+ dtype=dtypes.int32)
with self.assertRaisesRegex(ValueError, "Dtypes don't match"):
- ragged_shape.broadcast_dynamic_shape(shape_int32, shape_int64)
+ dynamic_ragged_shape.broadcast_dynamic_shape(shape_int32, shape_int64)
def testGetIdentityBroadcasterRankNone(self):
with self.assertRaisesRegex(ValueError, 'Shape must have a'):
@@ -2720,8 +2740,8 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(x):
- rts = RaggedShape._from_inner_shape(x)
- ragged_shape._get_identity_broadcaster(rts)
+ rts = DynamicRaggedShape._from_inner_shape(x)
+ dynamic_ragged_shape._get_identity_broadcaster(rts)
foo([3, 7, 5])
@@ -2737,16 +2757,16 @@
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(x):
- rts_a = RaggedShape._from_inner_shape(x)
- rts_b = RaggedShape._from_inner_shape(x)
- ragged_shape._get_broadcaster(rts_a, rts_b)
+ rts_a = DynamicRaggedShape._from_inner_shape(x)
+ rts_b = DynamicRaggedShape._from_inner_shape(x)
+ dynamic_ragged_shape._get_broadcaster(rts_a, rts_b)
foo([3, 7, 5])
def testFromTensorDType(self):
x = ragged_factory_ops.constant([[1, 2]])
self.assertEqual(x.row_splits.dtype, dtypes.int64)
- shape_x = RaggedShape.from_tensor(x)
+ shape_x = DynamicRaggedShape.from_tensor(x)
self.assertEqual(shape_x.dtype, dtypes.int64)
def testAddingRowSplits(self):
@@ -2774,7 +2794,7 @@
sess = session.Session()
with sess.as_default():
rt = ragged_factory_ops.constant([[3], [4, 5], [6]])
- rt_shape = RaggedShape.from_tensor(rt)
+ rt_shape = DynamicRaggedShape.from_tensor(rt)
new_flat_values = constant_op.constant(['a', 'b', 'c'])
rt2 = rt_shape._add_row_partitions(new_flat_values, validate=True)
sess.run([rt2])
diff --git a/tensorflow/python/ops/ragged/ragged_array_ops.py b/tensorflow/python/ops/ragged/ragged_array_ops.py
index 6e6d582..b6adcfe 100644
--- a/tensorflow/python/ops/ragged/ragged_array_ops.py
+++ b/tensorflow/python/ops/ragged/ragged_array_ops.py
@@ -26,9 +26,9 @@
from tensorflow.python.ops import gen_ragged_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sort_ops
+from tensorflow.python.ops.ragged import dynamic_ragged_shape
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_math_ops
-from tensorflow.python.ops.ragged import ragged_shape
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import segment_id_ops
@@ -978,7 +978,7 @@
num_or_size_splits,
message=('`num_or_size_splits` must be an `int` or 1-D list or '
'`Tensor` of integers.'))
- value_shape = ragged_shape.RaggedShape.from_tensor(value)
+ value_shape = dynamic_ragged_shape.DynamicRaggedShape.from_tensor(value)
axis = array_ops.get_positive_axis(axis, value_shape.rank)
try:
dim_size = value_shape[axis]
@@ -1043,9 +1043,10 @@
def ragged_reshape(
- x: ragged_tensor.RaggedOrDense, shape: ragged_shape.RaggedShape
+ x: ragged_tensor.RaggedOrDense,
+ shape: dynamic_ragged_shape.DynamicRaggedShape
) -> ragged_tensor.RaggedOrDense:
- """Reshapes a tensor or ragged tensor to a RaggedShape."""
+ """Reshapes a tensor or ragged tensor to a DynamicRaggedShape."""
if isinstance(x, ragged_tensor.RaggedTensor):
x = x.flat_values
flat_values = array_ops.reshape(x, shape.inner_shape)
@@ -1054,7 +1055,8 @@
def broadcast_to(
- rt_input: ragged_tensor.RaggedOrDense, shape: ragged_shape.RaggedShape
+ rt_input: ragged_tensor.RaggedOrDense,
+ shape: dynamic_ragged_shape.DynamicRaggedShape
) -> ragged_tensor.RaggedOrDense:
"""Broadcasts a potentially ragged tensor to a ragged shape.
@@ -1064,26 +1066,28 @@
Args:
rt_input: The potentially ragged tensor to broadcast.
- shape: A `RaggedShape`
+ shape: A `DynamicRaggedShape`
Returns:
A potentially ragged tensor whose values are taken from
`rt_input`, and whose shape matches `shape`.
"""
- return ragged_shape.broadcast_to(rt_input, shape)
+ return dynamic_ragged_shape.broadcast_to(rt_input, shape)
# TODO(martinz): decide if default should be the underlying row_splits_dtype.
-# tf.shape <- not allowed yet (RaggedShape isnt' public)
-def get_ragged_shape(x: ragged_tensor.RaggedTensor,
- out_type=dtypes.int32) -> ragged_shape.RaggedShape:
- """Returns a RaggedShape for a ragged tensor."""
- return ragged_shape.RaggedShape.from_tensor(x, dtype=out_type)
+# tf.shape <- not allowed yet (DynamicRaggedShape isnt' public)
+def get_dynamic_ragged_shape(
+ x: ragged_tensor.RaggedTensor,
+ out_type=dtypes.int32) -> dynamic_ragged_shape.DynamicRaggedShape:
+ """Returns a DynamicRaggedShape for a ragged tensor."""
+ return dynamic_ragged_shape.DynamicRaggedShape.from_tensor(x, dtype=out_type)
def broadcast_dynamic_shape(
- shape_x: ragged_shape.RaggedShape,
- shape_y: ragged_shape.RaggedShape) -> ragged_shape.RaggedShape:
+ shape_x: dynamic_ragged_shape.DynamicRaggedShape,
+ shape_y: dynamic_ragged_shape.DynamicRaggedShape
+) -> dynamic_ragged_shape.DynamicRaggedShape:
"""Returns the shape formed by broadcasting two shapes to be compatible.
1. If shape_x and shape_y both have row_partitions, then fail if their dtypes
@@ -1093,18 +1097,18 @@
3. If one has row_partitions, go with that dtype.
Args:
- shape_x: A `RaggedShape`
- shape_y: A `RaggedShape`
+ shape_x: A `DynamicRaggedShape`
+ shape_y: A `DynamicRaggedShape`
Returns:
- A `RaggedShape`.
+ A `DynamicRaggedShape`.
Raises:
ValueError: If `shape_x` and `shape_y` are not broadcast-compatible.
"""
- return ragged_shape.broadcast_dynamic_shape(shape_x, shape_y)
+ return dynamic_ragged_shape.broadcast_dynamic_shape(shape_x, shape_y)
-def ones(shape: ragged_shape.RaggedShape,
+def ones(shape: dynamic_ragged_shape.DynamicRaggedShape,
dtype=dtypes.float32,
name=None) -> ragged_tensor.RaggedOrDense:
"""Returns ones shaped like x."""
@@ -1113,7 +1117,7 @@
flat_values, shape.row_partitions)
-def zeros(shape: ragged_shape.RaggedShape,
+def zeros(shape: dynamic_ragged_shape.DynamicRaggedShape,
dtype=dtypes.float32,
name=None) -> ragged_tensor.RaggedOrDense:
"""Returns ones shaped like x."""
diff --git a/tensorflow/python/ops/structured/structured_tensor.py b/tensorflow/python/ops/structured/structured_tensor.py
index 7176eaa..da200ef 100644
--- a/tensorflow/python/ops/structured/structured_tensor.py
+++ b/tensorflow/python/ops/structured/structured_tensor.py
@@ -32,8 +32,8 @@
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops.ragged import dynamic_ragged_shape
from tensorflow.python.ops.ragged import ragged_factory_ops
-from tensorflow.python.ops.ragged import ragged_shape
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import row_partition as row_partition_lib
from tensorflow.python.ops.ragged.row_partition import RowPartition
@@ -1728,8 +1728,8 @@
# pylint:disable=protected-access
-def _ragged_shape_init(fields, shape, nrows, row_partitions):
- """Produce a RaggedShape for StructuredTensor."""
+def _dynamic_ragged_shape_init(fields, shape, nrows, row_partitions):
+ """Produce a DynamicRaggedShape for StructuredTensor."""
assert isinstance(fields, dict), fields
assert isinstance(shape, tensor_shape.TensorShape), shape
assert nrows is None or isinstance(nrows, ops.Tensor), nrows
@@ -1742,7 +1742,7 @@
# TODO(martinz): figure out whether to validate.
dtype = _find_shape_dtype(fields, nrows, row_partitions)
if rank == 0:
- return ragged_shape.RaggedShape._from_inner_shape(
+ return dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape(
array_ops.zeros((0,), dtype=dtype))
if rank == 1:
@@ -1751,9 +1751,8 @@
alt_value = alt_value.value
if alt_value is not None:
nrows = alt_value
- return ragged_shape.RaggedShape._from_inner_shape([nrows], dtype=dtype)
+ return dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape(
+ [nrows], dtype=dtype)
- return ragged_shape.RaggedShape.from_row_partitions(row_partitions,
- dtype=dtype)
-
-
+ return dynamic_ragged_shape.DynamicRaggedShape.from_row_partitions(
+ row_partitions, dtype=dtype)
diff --git a/tensorflow/python/ops/structured/structured_tensor_test.py b/tensorflow/python/ops/structured/structured_tensor_test.py
index 54edc6b..4afd48f 100644
--- a/tensorflow/python/ops/structured/structured_tensor_test.py
+++ b/tensorflow/python/ops/structured/structured_tensor_test.py
@@ -1659,19 +1659,19 @@
spec = structured_tensor.StructuredTensorSpec([None], {})
self.assertEqual(spec.shape.as_list(), [None])
- def test_ragged_shape_init_vector(self):
+ def test_dynamic_ragged_shape_init_vector(self):
x = constant_op.constant([1, 2, 3, 4])
y = constant_op.constant([[1, 2], [3, 4], [5, 6], [7, 8]])
fields = {"x": x, "y": y}
nrows = constant_op.constant(4)
shape = tensor_shape.TensorShape((4,))
row_partitions = ()
- rs = structured_tensor._ragged_shape_init(fields, shape, nrows,
- row_partitions)
+ rs = structured_tensor._dynamic_ragged_shape_init(fields, shape, nrows,
+ row_partitions)
self.assertEqual(
repr(rs._to_tensor_shape()), repr(tensor_shape.TensorShape((4,))))
- def test_ragged_shape_init_scalar(self):
+ def test_dynamic_ragged_shape_init_scalar(self):
x = constant_op.constant([1, 2, 3, 4])
y = constant_op.constant([[1, 2], [3, 4], [5, 6], [7, 8]])
fields = {"x": x, "y": y}
@@ -1679,19 +1679,19 @@
shape = tensor_shape.TensorShape(())
row_partitions = ()
- rs = structured_tensor._ragged_shape_init(fields, shape, nrows,
- row_partitions)
+ rs = structured_tensor._dynamic_ragged_shape_init(fields, shape, nrows,
+ row_partitions)
self.assertEqual(
repr(rs._to_tensor_shape()), repr(tensor_shape.TensorShape(())))
- def test_ragged_shape_init_ragged(self):
+ def test_dynamic_ragged_shape_init_ragged(self):
x = ragged_factory_ops.constant_value([[1, 2, 3], [4]])
fields = {"x": x}
nrows = constant_op.constant(2, dtype=dtypes.int64)
shape = tensor_shape.TensorShape([2, None])
row_partitions = tuple(x._nested_row_partitions)
- rs = structured_tensor._ragged_shape_init(fields, shape, nrows,
- row_partitions)
+ rs = structured_tensor._dynamic_ragged_shape_init(fields, shape, nrows,
+ row_partitions)
self.assertEqual(
repr(rs._to_tensor_shape()), repr(tensor_shape.TensorShape((2, None))))