1 | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. |
2 | |
3 | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | you may not use this file except in compliance with the License. |
5 | You may obtain a copy of the License at |
6 | |
7 | http://www.apache.org/licenses/LICENSE-2.0 |
8 | |
9 | Unless required by applicable law or agreed to in writing, software |
10 | distributed under the License is distributed on an "AS IS" BASIS, |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | See the License for the specific language governing permissions and |
13 | limitations under the License. |
14 | ==============================================================================*/ |
15 | |
16 | #define EIGEN_USE_THREADS |
17 | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM |
18 | #define EIGEN_USE_GPU |
19 | #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM |
20 | |
21 | #include "tensorflow/core/kernels/ragged_tensor_variant.h" |
22 | |
23 | namespace tensorflow { |
24 | |
25 | string RaggedTensorVariant::TypeName() const { return "RaggedTensorVariant" ; } |
26 | |
27 | string RaggedTensorVariant::DebugString() const { |
28 | return absl::StrCat( |
29 | "RaggedTensorVariant(dtype=" , DataTypeString(values_.dtype()), |
30 | ", ragged_rank=" , nested_splits_.size(), ", splits_dtype=" , |
31 | DataTypeString(nested_splits_.empty() ? DT_INVALID |
32 | : nested_splits_.back().dtype())); |
33 | } |
34 | |
35 | void RaggedTensorVariant::Encode(VariantTensorData* data) const { |
36 | data->set_type_name(TypeName()); |
37 | for (const auto& splits : nested_splits_) { |
38 | *data->add_tensors() = splits; |
39 | } |
40 | *data->add_tensors() = values_; |
41 | } |
42 | |
43 | bool RaggedTensorVariant::Decode(const VariantTensorData& data) { |
44 | if (data.tensors_size() < 1) { |
45 | return false; |
46 | } |
47 | nested_splits_.assign(data.tensors().begin(), |
48 | std::prev(data.tensors().end())); |
49 | values_ = data.tensors().back(); |
50 | return true; |
51 | } |
52 | |
53 | namespace { |
54 | |
55 | Status RaggedTensorVariantDeviceCopy( |
56 | const RaggedTensorVariant& from, RaggedTensorVariant* to, |
57 | const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy) { |
58 | TF_RETURN_IF_ERROR(copy(from.values(), to->mutable_values())); |
59 | // TODO(b/170415165) Should we use `copy` to move splits from device<->host? |
60 | *to->mutable_nested_splits() = from.nested_splits(); |
61 | return OkStatus(); |
62 | } |
63 | |
64 | } // namespace |
65 | |
66 | REGISTER_UNARY_VARIANT_UNARY_OP_FUNCTION( |
67 | ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_CPU, RaggedTensorVariant, |
68 | RaggedTensorVariantZerosLike<CPUDevice>); |
69 | |
70 | REGISTER_UNARY_VARIANT_BINARY_OP_FUNCTION( |
71 | ADD_VARIANT_BINARY_OP, DEVICE_CPU, RaggedTensorVariant, |
72 | RaggedTensorVariantBinaryAdd<CPUDevice>); |
73 | |
74 | REGISTER_UNARY_VARIANT_DECODE_FUNCTION(RaggedTensorVariant, |
75 | "RaggedTensorVariant" ); |
76 | |
77 | #define REGISTER_RAGGED_TENSOR_VARIANT_COPY(DIRECTION) \ |
78 | INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION( \ |
79 | RaggedTensorVariant, DIRECTION, RaggedTensorVariantDeviceCopy) |
80 | |
81 | REGISTER_RAGGED_TENSOR_VARIANT_COPY(VariantDeviceCopyDirection::HOST_TO_DEVICE); |
82 | REGISTER_RAGGED_TENSOR_VARIANT_COPY(VariantDeviceCopyDirection::DEVICE_TO_HOST); |
83 | REGISTER_RAGGED_TENSOR_VARIANT_COPY( |
84 | VariantDeviceCopyDirection::DEVICE_TO_DEVICE); |
85 | |
86 | } // namespace tensorflow |
87 | |