1/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#ifndef TENSORFLOW_CORE_KERNELS_CONCAT_LIB_H_
17#define TENSORFLOW_CORE_KERNELS_CONCAT_LIB_H_
18
19#include <vector>
20
21#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
22#include "tensorflow/core/framework/device_base.h"
23#include "tensorflow/core/framework/register_types.h"
24
25namespace tensorflow {
26
27// Functors to concatenate tensors. These always take a rank-2 tensor (i.e a
28// matrix) and concatenate it along the axis 1 ("putting them next to each
29// other" as opposed to "putting them on top of one another").
30//
31// Any concatenation of n-dimensional tensors across any axis can be reduced to
32// a concatenation of two-dimensional tensors across the axis 1 by first
33// partitioning the axes of the original tensors into those less than the axis
34// to be concatenated across and the rest. Then reshape the tensors into a
35// two-dimensional tensor by collapsing these two sets of axes and concatenate
36// the resulting matrices across the axis 1, finally reshaping the result to
37// have the proper shape.
38//
39// So, for example, when stacking N tensors, reshape each to have shape
40// {1, Numelements} and reshape the result matrix to have shape
41// {1, N * NumElements} before passing it to this functor.
42
43// Assumes all elements of inputs are nonempty.
44// Assumes output is nonempty.
45template <typename T>
46void ConcatCPU(
47 DeviceBase* d,
48 const std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>&
49 inputs,
50 typename TTypes<T, 2>::Matrix* output);
51#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
52 (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
53template <typename T>
54void ConcatGPU(
55 OpKernelContext* c,
56 const std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>&
57 inputs_flat,
58 Tensor* output, typename TTypes<T, 2>::Tensor* output_flat);
59
60// Explicit instantiations in concat_lib_gpu.cc.
61#define REGISTER(T) \
62 extern template void ConcatGPU<T>( \
63 OpKernelContext * c, \
64 const std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>& \
65 inputs_flat, \
66 Tensor* output, typename TTypes<T, 2>::Tensor* output_flat);
67
68TF_CALL_INTEGRAL_TYPES(REGISTER); // int32 Needed for TensorLists.
69TF_CALL_bfloat16(REGISTER);
70TF_CALL_GPU_ALL_TYPES(REGISTER);
71#undef REGISTER
72#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
73
74} // namespace tensorflow
75
76#endif // TENSORFLOW_CORE_KERNELS_CONCAT_LIB_H_
77