1/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// See docs in ../ops/nn_ops.cc.
17
18#define EIGEN_USE_THREADS
19
20#include "tensorflow/core/kernels/softplus_op.h"
21#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
22#include "tensorflow/core/framework/numeric_op.h"
23#include "tensorflow/core/framework/op_kernel.h"
24#include "tensorflow/core/framework/register_types.h"
25#include "tensorflow/core/framework/tensor.h"
26#include "tensorflow/core/lib/core/errors.h"
27
28namespace tensorflow {
29
30typedef Eigen::ThreadPoolDevice CPUDevice;
31typedef Eigen::GpuDevice GPUDevice;
32
33template <typename Device, typename T>
34class SoftplusOp : public UnaryElementWiseOp<T, SoftplusOp<Device, T>> {
35 public:
36 explicit SoftplusOp(OpKernelConstruction* context)
37 : UnaryElementWiseOp<T, SoftplusOp<Device, T>>(context) {}
38
39 void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) {
40 functor::Softplus<Device, T> functor;
41 functor(context->eigen_device<Device>(), input.flat<T>(),
42 output->flat<T>());
43 }
44};
45
46template <typename Device, typename T>
47class SoftplusGradOp
48 : public BinaryElementWiseOp<T, SoftplusGradOp<Device, T>> {
49 public:
50 explicit SoftplusGradOp(OpKernelConstruction* context)
51 : BinaryElementWiseOp<T, SoftplusGradOp<Device, T>>(context) {}
52
53 void OperateNoTemplate(OpKernelContext* context, const Tensor& g,
54 const Tensor& a, Tensor* output);
55
56 // INPUTS:
57 // g (gradients): backpropagated gradients
58 // a (inputs): inputs that were passed to SoftplusOp()
59 // OUTPUT:
60 // gradients to backprop
61 template <int NDIMS>
62 void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a,
63 Tensor* output) {
64 OperateNoTemplate(context, g, a, output);
65 }
66};
67template <typename Device, typename T>
68void SoftplusGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context,
69 const Tensor& g,
70 const Tensor& a,
71 Tensor* output) {
72 OP_REQUIRES(context, a.IsSameSize(g),
73 errors::InvalidArgument("g and a must be the same size"));
74 functor::SoftplusGrad<Device, T> functor;
75 functor(context->eigen_device<Device>(), g.flat<T>(), a.flat<T>(),
76 output->flat<T>());
77}
78
79#define REGISTER_KERNELS(type) \
80 REGISTER_KERNEL_BUILDER( \
81 Name("Softplus").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
82 SoftplusOp<CPUDevice, type>); \
83 REGISTER_KERNEL_BUILDER( \
84 Name("SoftplusGrad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
85 SoftplusGradOp<CPUDevice, type>);
86
87TF_CALL_FLOAT_TYPES(REGISTER_KERNELS);
88#undef REGISTER_KERNELS
89
90#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
91 (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
92// Forward declarations of the functor specializations for GPU.
93namespace functor {
94#define DECLARE_SOFTPLUS_GPU_SPEC(T) \
95 template <> \
96 void Softplus<GPUDevice, T>::operator()( \
97 const GPUDevice& d, typename TTypes<T>::ConstTensor features, \
98 typename TTypes<T>::Tensor activations); \
99 extern template struct Softplus<GPUDevice, T>;
100
101#define DECLARE_SOFTPLUS_GRAD_GPU_SPEC(T) \
102 template <> \
103 void SoftplusGrad<GPUDevice, T>::operator()( \
104 const GPUDevice& d, typename TTypes<T>::ConstTensor gradients, \
105 typename TTypes<T>::ConstTensor features, \
106 typename TTypes<T>::Tensor backprops); \
107 extern template struct SoftplusGrad<GPUDevice, T>;
108
109#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED)
110TF_CALL_GPU_NUMBER_TYPES(DECLARE_SOFTPLUS_GPU_SPEC);
111#endif
112
113TF_CALL_GPU_NUMBER_TYPES(DECLARE_SOFTPLUS_GRAD_GPU_SPEC);
114} // namespace functor
115
116// Registration of the GPU implementations.
117#define REGISTER_SOFTPLUS_GPU_KERNELS(type) \
118 REGISTER_KERNEL_BUILDER( \
119 Name("Softplus").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
120 SoftplusOp<GPUDevice, type>);
121
122#define REGISTER_SOFTPLUS_GRAD_GPU_KERNELS(type) \
123 REGISTER_KERNEL_BUILDER( \
124 Name("SoftplusGrad").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
125 SoftplusGradOp<GPUDevice, type>);
126
127#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED)
128TF_CALL_GPU_NUMBER_TYPES(REGISTER_SOFTPLUS_GPU_KERNELS);
129#endif
130
131TF_CALL_GPU_NUMBER_TYPES(REGISTER_SOFTPLUS_GRAD_GPU_KERNELS);
132
133#undef REGISTER_SOFTPLUS_GPU_KERNELS
134#undef REGISTER_SOFTPLUS_GRAD_GPU_KERNELS
135
136#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
137
138} // namespace tensorflow
139