1/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#ifndef TENSORFLOW_CORE_KERNELS_EIGEN_ACTIVATIONS_H_
17#define TENSORFLOW_CORE_KERNELS_EIGEN_ACTIVATIONS_H_
18
19#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
20
21namespace Eigen {
22
23/** scalar_sigmoid_fast_derivative_op
24 * \ingroup CXX11_NeuralNetworks_Module
25 * \brief Template functor to compute the fast derivative of a sigmoid
26 *
27 * Input should be the backpropagated gradient.
28 *
29 * \sa class CwiseUnaryOp, Cwise::sigmoid_fast_derivative()
30 */
31template <typename T>
32struct scalar_sigmoid_fast_derivative_op {
33 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& y) const {
34 const T one = T(1);
35 return (one - y) * y;
36 }
37
38 template <typename Packet>
39 inline Packet packetOp(const Packet& y) const {
40 const Packet one = internal::pset1<Packet>(1);
41 return internal::pmul(internal::psub(one, y), y);
42 }
43};
44
45namespace internal {
46template <typename T>
47struct functor_traits<scalar_sigmoid_fast_derivative_op<T> > {
48 enum {
49 Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost,
50 PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasMul &&
51 packet_traits<T>::HasNegate
52 };
53};
54} // namespace internal
55
56/** scalar_tanh_fast_derivative_op
57 * \ingroup CXX11_NeuralNetworks_Module
58 * \brief Template functor to compute the fast derivative of a tanh
59 *
60 * Input should be the backpropagated gradient.
61 *
62 * \sa class CwiseUnaryOp, Cwise::tanh_fast_derivative()
63 */
64template <typename T>
65struct scalar_tanh_fast_derivative_op {
66 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& y) const {
67 const T one = T(1);
68 return one - (y * y);
69 }
70
71 template <typename Packet>
72 inline Packet packetOp(const Packet& y) const {
73 const Packet one = internal::pset1<Packet>(1);
74 return internal::psub(one, internal::pmul(y, y));
75 }
76};
77
78namespace internal {
79template <typename T>
80struct functor_traits<scalar_tanh_fast_derivative_op<T> > {
81 enum {
82 Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost * 1,
83 PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasMul &&
84 packet_traits<T>::HasNegate
85 };
86};
87} // namespace internal
88
89/**
90 * \ingroup CXX11_NeuralNetworks_Module
91 * \brief Template functor to clip the magnitude of the first scalar.
92 *
93 * \sa class CwiseBinaryOp, MatrixBase::Clip
94 */
95template <typename Scalar>
96struct scalar_clip_op {
97 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar
98 operator()(const Scalar& a, const Scalar& b) const {
99 return numext::mini(numext::maxi(a, -b), b);
100 }
101 template <typename Packet>
102 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet
103 packetOp(const Packet& a, const Packet& b) const {
104 return internal::pmin(internal::pmax(a, internal::pnegate(b)), b);
105 }
106};
107
108namespace internal {
109template <typename Scalar>
110struct functor_traits<scalar_clip_op<Scalar> > {
111 enum {
112 Cost = NumTraits<Scalar>::AddCost * 3,
113 PacketAccess = packet_traits<Scalar>::HasMax &&
114 packet_traits<Scalar>::HasMin &&
115 packet_traits<Scalar>::HasNegate
116 };
117};
118} // namespace internal
119
120} // end namespace Eigen
121
122#endif // TENSORFLOW_CORE_KERNELS_EIGEN_ACTIVATIONS_H_
123