1 | // This file is MACHINE GENERATED! Do not edit. |
2 | |
3 | #ifndef TENSORFLOW_CC_OPS_MATH_OPS_INTERNAL_H_ |
4 | #define TENSORFLOW_CC_OPS_MATH_OPS_INTERNAL_H_ |
5 | |
6 | // This file is MACHINE GENERATED! Do not edit. |
7 | |
8 | #include "tensorflow/cc/framework/ops.h" |
9 | #include "tensorflow/cc/framework/scope.h" |
10 | #include "tensorflow/core/framework/tensor.h" |
11 | #include "tensorflow/core/framework/tensor_shape.h" |
12 | #include "tensorflow/core/framework/types.h" |
13 | #include "tensorflow/core/lib/gtl/array_slice.h" |
14 | |
15 | namespace tensorflow { |
16 | namespace ops { |
17 | namespace internal { |
18 | // NOTE: This namespace has internal TensorFlow details that |
19 | // are not part of TensorFlow's public API. |
20 | |
21 | /// @defgroup math_ops_internal Math Ops Internal |
22 | /// @{ |
23 | |
24 | /// Compute the cumulative product of the tensor `x` along `axis`. |
25 | /// |
26 | /// By default, this op performs an inclusive cumulative log-sum-exp, |
27 | /// which means that the first |
28 | /// element of the input is identical to the first element of the output: |
29 | /// ```python |
30 | /// tf.math.cumulative_logsumexp([a, b, c]) # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))] |
31 | /// ``` |
32 | /// |
33 | /// By setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is |
34 | /// performed instead: |
35 | /// ```python |
36 | /// tf.cumulative_logsumexp([a, b, c], exclusive=True) # => [-inf, a, log(exp(a) * exp(b))] |
37 | /// ``` |
38 | /// Note that the neutral element of the log-sum-exp operation is `-inf`, |
39 | /// however, for performance reasons, the minimal value representable by the |
40 | /// floating point type is used instead. |
41 | /// |
42 | /// By setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the |
43 | /// opposite direction. |
44 | /// |
45 | /// Args: |
46 | /// * scope: A Scope object |
47 | /// * x: A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`. |
48 | /// * axis: A `Tensor` of type `int32` (default: 0). Must be in the range |
49 | /// `[-rank(x), rank(x))`. |
50 | /// |
51 | /// Optional attributes (see `Attrs`): |
52 | /// * exclusive: If `True`, perform exclusive cumulative log-sum-exp. |
53 | /// * reverse: A `bool` (default: False). |
54 | /// |
55 | /// Returns: |
56 | /// * `Output`: The out tensor. |
57 | class CumulativeLogsumexp { |
58 | public: |
59 | /// Optional attribute setters for CumulativeLogsumexp |
60 | struct Attrs { |
61 | /// If `True`, perform exclusive cumulative log-sum-exp. |
62 | /// |
63 | /// Defaults to false |
64 | TF_MUST_USE_RESULT Attrs Exclusive(bool x) { |
65 | Attrs ret = *this; |
66 | ret.exclusive_ = x; |
67 | return ret; |
68 | } |
69 | |
70 | /// A `bool` (default: False). |
71 | /// |
72 | /// Defaults to false |
73 | TF_MUST_USE_RESULT Attrs Reverse(bool x) { |
74 | Attrs ret = *this; |
75 | ret.reverse_ = x; |
76 | return ret; |
77 | } |
78 | |
79 | bool exclusive_ = false; |
80 | bool reverse_ = false; |
81 | }; |
82 | CumulativeLogsumexp(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
83 | ::tensorflow::Input axis); |
84 | CumulativeLogsumexp(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
85 | ::tensorflow::Input axis, const CumulativeLogsumexp::Attrs& |
86 | attrs); |
87 | operator ::tensorflow::Output() const { return out; } |
88 | operator ::tensorflow::Input() const { return out; } |
89 | ::tensorflow::Node* node() const { return out.node(); } |
90 | |
91 | static Attrs Exclusive(bool x) { |
92 | return Attrs().Exclusive(x); |
93 | } |
94 | static Attrs Reverse(bool x) { |
95 | return Attrs().Reverse(x); |
96 | } |
97 | |
98 | Operation operation; |
99 | ::tensorflow::Output out; |
100 | }; |
101 | |
102 | /// Computes the gradient of `igamma(a, x)` wrt `a`. |
103 | /// |
104 | /// Args: |
105 | /// * scope: A Scope object |
106 | /// |
107 | /// Returns: |
108 | /// * `Output`: The z tensor. |
109 | class IgammaGradA { |
110 | public: |
111 | IgammaGradA(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
112 | ::tensorflow::Input x); |
113 | operator ::tensorflow::Output() const { return z; } |
114 | operator ::tensorflow::Input() const { return z; } |
115 | ::tensorflow::Node* node() const { return z.node(); } |
116 | |
117 | Operation operation; |
118 | ::tensorflow::Output z; |
119 | }; |
120 | |
121 | /// Computes the gradient for the inverse of `x` wrt its input. |
122 | /// |
123 | /// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` |
124 | /// is the corresponding input gradient. |
125 | /// |
126 | /// Args: |
127 | /// * scope: A Scope object |
128 | /// |
129 | /// Returns: |
130 | /// * `Output`: The z tensor. |
131 | class InvGrad { |
132 | public: |
133 | InvGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input y, |
134 | ::tensorflow::Input dy); |
135 | operator ::tensorflow::Output() const { return z; } |
136 | operator ::tensorflow::Input() const { return z; } |
137 | ::tensorflow::Node* node() const { return z.node(); } |
138 | |
139 | Operation operation; |
140 | ::tensorflow::Output z; |
141 | }; |
142 | |
143 | /// Generates values in an interval. |
144 | /// |
145 | /// A sequence of `num` evenly-spaced values are generated beginning at `start`. |
146 | /// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, |
147 | /// so that the last one is exactly `stop`. |
148 | /// |
149 | /// For example: |
150 | /// |
151 | /// ``` |
152 | /// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] |
153 | /// ``` |
154 | /// |
155 | /// Args: |
156 | /// * scope: A Scope object |
157 | /// * start: 0-D tensor. First entry in the range. |
158 | /// * stop: 0-D tensor. Last entry in the range. |
159 | /// * num: 0-D tensor. Number of values to generate. |
160 | /// |
161 | /// Returns: |
162 | /// * `Output`: 1-D. The generated values. |
163 | class LinSpace { |
164 | public: |
165 | LinSpace(const ::tensorflow::Scope& scope, ::tensorflow::Input start, |
166 | ::tensorflow::Input stop, ::tensorflow::Input num); |
167 | operator ::tensorflow::Output() const { return output; } |
168 | operator ::tensorflow::Input() const { return output; } |
169 | ::tensorflow::Node* node() const { return output.node(); } |
170 | |
171 | Operation operation; |
172 | ::tensorflow::Output output; |
173 | }; |
174 | |
175 | /// Computes the gradient for the inverse of `x` wrt its input. |
176 | /// |
177 | /// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` |
178 | /// is the corresponding input gradient. |
179 | /// |
180 | /// Args: |
181 | /// * scope: A Scope object |
182 | /// |
183 | /// Returns: |
184 | /// * `Output`: The z tensor. |
185 | class ReciprocalGrad { |
186 | public: |
187 | ReciprocalGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input y, |
188 | ::tensorflow::Input dy); |
189 | operator ::tensorflow::Output() const { return z; } |
190 | operator ::tensorflow::Input() const { return z; } |
191 | ::tensorflow::Node* node() const { return z.node(); } |
192 | |
193 | Operation operation; |
194 | ::tensorflow::Output z; |
195 | }; |
196 | |
197 | /// Computes requantization range per channel. |
198 | /// |
199 | /// Args: |
200 | /// * scope: A Scope object |
201 | /// * input: The original input tensor. |
202 | /// * input_min: The minimum value of the input tensor |
203 | /// * input_max: The maximum value of the input tensor. |
204 | /// * clip_value_max: The maximum value of the output that needs to be clipped. |
205 | /// Example: set this to 6 for Relu6. |
206 | /// |
207 | /// Returns: |
208 | /// * `Output` output_min: The minimum value of the final output tensor |
209 | /// * `Output` output_max: The maximum value of the final output tensor. |
210 | class RequantizationRangePerChannel { |
211 | public: |
212 | RequantizationRangePerChannel(const ::tensorflow::Scope& scope, |
213 | ::tensorflow::Input input, ::tensorflow::Input |
214 | input_min, ::tensorflow::Input input_max, float |
215 | clip_value_max); |
216 | |
217 | Operation operation; |
218 | ::tensorflow::Output output_min; |
219 | ::tensorflow::Output output_max; |
220 | }; |
221 | |
222 | /// Requantizes input with min and max values known per channel. |
223 | /// |
224 | /// Args: |
225 | /// * scope: A Scope object |
226 | /// * input: The original input tensor. |
227 | /// * input_min: The minimum value of the input tensor |
228 | /// * input_max: The maximum value of the input tensor. |
229 | /// * requested_output_min: The minimum value of the output tensor requested. |
230 | /// * requested_output_max: The maximum value of the output tensor requested. |
231 | /// |
232 | /// Optional attributes (see `Attrs`): |
233 | /// * out_type: The quantized type of output tensor that needs to be converted. |
234 | /// |
235 | /// Returns: |
236 | /// * `Output` output: Output tensor. |
237 | /// * `Output` output_min: The minimum value of the final output tensor |
238 | /// * `Output` output_max: The maximum value of the final output tensor. |
239 | class RequantizePerChannel { |
240 | public: |
241 | /// Optional attribute setters for RequantizePerChannel |
242 | struct Attrs { |
243 | /// The quantized type of output tensor that needs to be converted. |
244 | /// |
245 | /// Defaults to DT_QUINT8 |
246 | TF_MUST_USE_RESULT Attrs OutType(DataType x) { |
247 | Attrs ret = *this; |
248 | ret.out_type_ = x; |
249 | return ret; |
250 | } |
251 | |
252 | DataType out_type_ = DT_QUINT8; |
253 | }; |
254 | RequantizePerChannel(const ::tensorflow::Scope& scope, ::tensorflow::Input |
255 | input, ::tensorflow::Input input_min, ::tensorflow::Input |
256 | input_max, ::tensorflow::Input requested_output_min, |
257 | ::tensorflow::Input requested_output_max); |
258 | RequantizePerChannel(const ::tensorflow::Scope& scope, ::tensorflow::Input |
259 | input, ::tensorflow::Input input_min, ::tensorflow::Input |
260 | input_max, ::tensorflow::Input requested_output_min, |
261 | ::tensorflow::Input requested_output_max, const |
262 | RequantizePerChannel::Attrs& attrs); |
263 | |
264 | static Attrs OutType(DataType x) { |
265 | return Attrs().OutType(x); |
266 | } |
267 | |
268 | Operation operation; |
269 | ::tensorflow::Output output; |
270 | ::tensorflow::Output output_min; |
271 | ::tensorflow::Output output_max; |
272 | }; |
273 | |
274 | /// Computes the gradient for the rsqrt of `x` wrt its input. |
275 | /// |
276 | /// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy` |
277 | /// is the corresponding input gradient. |
278 | /// |
279 | /// Args: |
280 | /// * scope: A Scope object |
281 | /// |
282 | /// Returns: |
283 | /// * `Output`: The z tensor. |
284 | class RsqrtGrad { |
285 | public: |
286 | RsqrtGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input y, |
287 | ::tensorflow::Input dy); |
288 | operator ::tensorflow::Output() const { return z; } |
289 | operator ::tensorflow::Input() const { return z; } |
290 | ::tensorflow::Node* node() const { return z.node(); } |
291 | |
292 | Operation operation; |
293 | ::tensorflow::Output z; |
294 | }; |
295 | |
296 | /// Computes the gradient of the sigmoid of `x` wrt its input. |
297 | /// |
298 | /// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and |
299 | /// `dy` is the corresponding input gradient. |
300 | /// |
301 | /// Args: |
302 | /// * scope: A Scope object |
303 | /// |
304 | /// Returns: |
305 | /// * `Output`: The z tensor. |
306 | class SigmoidGrad { |
307 | public: |
308 | SigmoidGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input y, |
309 | ::tensorflow::Input dy); |
310 | operator ::tensorflow::Output() const { return z; } |
311 | operator ::tensorflow::Input() const { return z; } |
312 | ::tensorflow::Node* node() const { return z.node(); } |
313 | |
314 | Operation operation; |
315 | ::tensorflow::Output z; |
316 | }; |
317 | |
318 | /// Generates points from the Sobol sequence. |
319 | /// |
320 | /// Creates a Sobol sequence with `num_results` samples. Each sample has dimension |
321 | /// `dim`. Skips the first `skip` samples. |
322 | /// |
323 | /// Args: |
324 | /// * scope: A Scope object |
325 | /// * dim: Positive scalar `Tensor` representing each sample's dimension. |
326 | /// * num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol points to return |
327 | /// in the output. |
328 | /// * skip: Positive scalar `Tensor` of dtype int32. The number of initial points of the |
329 | /// Sobol sequence to skip. |
330 | /// |
331 | /// Optional attributes (see `Attrs`): |
332 | /// * dtype: The type of the sample. One of: `float32` or `float64`. |
333 | /// |
334 | /// Returns: |
335 | /// * `Output`: `Tensor` of samples from Sobol sequence with `shape` [num_results, dim]. |
336 | class SobolSample { |
337 | public: |
338 | /// Optional attribute setters for SobolSample |
339 | struct Attrs { |
340 | /// The type of the sample. One of: `float32` or `float64`. |
341 | /// |
342 | /// Defaults to DT_FLOAT |
343 | TF_MUST_USE_RESULT Attrs Dtype(DataType x) { |
344 | Attrs ret = *this; |
345 | ret.dtype_ = x; |
346 | return ret; |
347 | } |
348 | |
349 | DataType dtype_ = DT_FLOAT; |
350 | }; |
351 | SobolSample(const ::tensorflow::Scope& scope, ::tensorflow::Input dim, |
352 | ::tensorflow::Input num_results, ::tensorflow::Input skip); |
353 | SobolSample(const ::tensorflow::Scope& scope, ::tensorflow::Input dim, |
354 | ::tensorflow::Input num_results, ::tensorflow::Input skip, const |
355 | SobolSample::Attrs& attrs); |
356 | operator ::tensorflow::Output() const { return samples; } |
357 | operator ::tensorflow::Input() const { return samples; } |
358 | ::tensorflow::Node* node() const { return samples.node(); } |
359 | |
360 | static Attrs Dtype(DataType x) { |
361 | return Attrs().Dtype(x); |
362 | } |
363 | |
364 | Operation operation; |
365 | ::tensorflow::Output samples; |
366 | }; |
367 | |
368 | /// Computes the gradient for the sqrt of `x` wrt its input. |
369 | /// |
370 | /// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy` |
371 | /// is the corresponding input gradient. |
372 | /// |
373 | /// Args: |
374 | /// * scope: A Scope object |
375 | /// |
376 | /// Returns: |
377 | /// * `Output`: The z tensor. |
378 | class SqrtGrad { |
379 | public: |
380 | SqrtGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input y, |
381 | ::tensorflow::Input dy); |
382 | operator ::tensorflow::Output() const { return z; } |
383 | operator ::tensorflow::Input() const { return z; } |
384 | ::tensorflow::Node* node() const { return z.node(); } |
385 | |
386 | Operation operation; |
387 | ::tensorflow::Output z; |
388 | }; |
389 | |
390 | /// Computes the gradient for the tanh of `x` wrt its input. |
391 | /// |
392 | /// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy` |
393 | /// is the corresponding input gradient. |
394 | /// |
395 | /// Args: |
396 | /// * scope: A Scope object |
397 | /// |
398 | /// Returns: |
399 | /// * `Output`: The z tensor. |
400 | class TanhGrad { |
401 | public: |
402 | TanhGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input y, |
403 | ::tensorflow::Input dy); |
404 | operator ::tensorflow::Output() const { return z; } |
405 | operator ::tensorflow::Input() const { return z; } |
406 | ::tensorflow::Node* node() const { return z.node(); } |
407 | |
408 | Operation operation; |
409 | ::tensorflow::Output z; |
410 | }; |
411 | |
412 | } // namespace internal |
413 | } // namespace ops |
414 | } // namespace tensorflow |
415 | |
416 | #endif // TENSORFLOW_CC_OPS_MATH_OPS_INTERNAL_H_ |
417 | |