1// This file is MACHINE GENERATED! Do not edit.
2
3#ifndef TENSORFLOW_CC_OPS_TRAINING_OPS_INTERNAL_H_
4#define TENSORFLOW_CC_OPS_TRAINING_OPS_INTERNAL_H_
5
6// This file is MACHINE GENERATED! Do not edit.
7
8#include "tensorflow/cc/framework/ops.h"
9#include "tensorflow/cc/framework/scope.h"
10#include "tensorflow/core/framework/tensor.h"
11#include "tensorflow/core/framework/tensor_shape.h"
12#include "tensorflow/core/framework/types.h"
13#include "tensorflow/core/lib/gtl/array_slice.h"
14
15namespace tensorflow {
16namespace ops {
17namespace internal {
18// NOTE: This namespace has internal TensorFlow details that
19// are not part of TensorFlow's public API.
20
21/// @defgroup training_ops_internal Training Ops Internal
22/// @{
23
24/// Update '*var' according to the AdaMax algorithm.
25///
26/// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
27/// v_t <- max(beta2 * v_{t-1}, abs(g))
28/// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
29///
30/// Args:
31/// * scope: A Scope object
32/// * var: Should be from a Variable().
33/// * m: Should be from a Variable().
34/// * v: Should be from a Variable().
35/// * beta1_power: Must be a scalar.
36/// * lr: Scaling factor. Must be a scalar.
37/// * beta1: Momentum factor. Must be a scalar.
38/// * beta2: Momentum factor. Must be a scalar.
39/// * epsilon: Ridge term. Must be a scalar.
40/// * grad: The gradient.
41///
42/// Optional attributes (see `Attrs`):
43/// * use_locking: If `True`, updating of the var, m, and v tensors will be protected
44/// by a lock; otherwise the behavior is undefined, but may exhibit less
45/// contention.
46///
47/// Returns:
48/// * `Output`: Same as "var".
49class ApplyAdaMax {
50 public:
51 /// Optional attribute setters for ApplyAdaMax
52 struct Attrs {
53 /// If `True`, updating of the var, m, and v tensors will be protected
54 /// by a lock; otherwise the behavior is undefined, but may exhibit less
55 /// contention.
56 ///
57 /// Defaults to false
58 TF_MUST_USE_RESULT Attrs UseLocking(bool x) {
59 Attrs ret = *this;
60 ret.use_locking_ = x;
61 return ret;
62 }
63
64 bool use_locking_ = false;
65 };
66 ApplyAdaMax(const ::tensorflow::Scope& scope, ::tensorflow::Input var,
67 ::tensorflow::Input m, ::tensorflow::Input v, ::tensorflow::Input
68 beta1_power, ::tensorflow::Input lr, ::tensorflow::Input beta1,
69 ::tensorflow::Input beta2, ::tensorflow::Input epsilon,
70 ::tensorflow::Input grad);
71 ApplyAdaMax(const ::tensorflow::Scope& scope, ::tensorflow::Input var,
72 ::tensorflow::Input m, ::tensorflow::Input v, ::tensorflow::Input
73 beta1_power, ::tensorflow::Input lr, ::tensorflow::Input beta1,
74 ::tensorflow::Input beta2, ::tensorflow::Input epsilon,
75 ::tensorflow::Input grad, const ApplyAdaMax::Attrs& attrs);
76 operator ::tensorflow::Output() const { return out; }
77 operator ::tensorflow::Input() const { return out; }
78 ::tensorflow::Node* node() const { return out.node(); }
79
80 static Attrs UseLocking(bool x) {
81 return Attrs().UseLocking(x);
82 }
83
84 Operation operation;
85 ::tensorflow::Output out;
86};
87
88/// Update '*var' according to the adagrad scheme.
89///
90/// accum += grad * grad
91/// var -= lr * grad * (1 / sqrt(accum))
92///
93/// Args:
94/// * scope: A Scope object
95/// * var: Should be from a Variable().
96/// * accum: Should be from a Variable().
97/// * lr: Scaling factor. Must be a scalar.
98/// * epsilon: Constant factor. Must be a scalar.
99/// * grad: The gradient.
100///
101/// Optional attributes (see `Attrs`):
102/// * use_locking: If `True`, updating of the var and accum tensors will be protected
103/// by a lock; otherwise the behavior is undefined, but may exhibit less
104/// contention.
105///
106/// Returns:
107/// * `Output`: Same as "var".
108class ApplyAdagradV2 {
109 public:
110 /// Optional attribute setters for ApplyAdagradV2
111 struct Attrs {
112 /// If `True`, updating of the var and accum tensors will be protected
113 /// by a lock; otherwise the behavior is undefined, but may exhibit less
114 /// contention.
115 ///
116 /// Defaults to false
117 TF_MUST_USE_RESULT Attrs UseLocking(bool x) {
118 Attrs ret = *this;
119 ret.use_locking_ = x;
120 return ret;
121 }
122
123 /// Defaults to true
124 TF_MUST_USE_RESULT Attrs UpdateSlots(bool x) {
125 Attrs ret = *this;
126 ret.update_slots_ = x;
127 return ret;
128 }
129
130 bool use_locking_ = false;
131 bool update_slots_ = true;
132 };
133 ApplyAdagradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input var,
134 ::tensorflow::Input accum, ::tensorflow::Input lr,
135 ::tensorflow::Input epsilon, ::tensorflow::Input grad);
136 ApplyAdagradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input var,
137 ::tensorflow::Input accum, ::tensorflow::Input lr,
138 ::tensorflow::Input epsilon, ::tensorflow::Input grad, const
139 ApplyAdagradV2::Attrs& attrs);
140 operator ::tensorflow::Output() const { return out; }
141 operator ::tensorflow::Input() const { return out; }
142 ::tensorflow::Node* node() const { return out.node(); }
143
144 static Attrs UseLocking(bool x) {
145 return Attrs().UseLocking(x);
146 }
147 static Attrs UpdateSlots(bool x) {
148 return Attrs().UpdateSlots(x);
149 }
150
151 Operation operation;
152 ::tensorflow::Output out;
153};
154
155/// Update '*var' according to the AdaMax algorithm.
156///
157/// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
158/// v_t <- max(beta2 * v_{t-1}, abs(g))
159/// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
160///
161/// Args:
162/// * scope: A Scope object
163/// * var: Should be from a Variable().
164/// * m: Should be from a Variable().
165/// * v: Should be from a Variable().
166/// * beta1_power: Must be a scalar.
167/// * lr: Scaling factor. Must be a scalar.
168/// * beta1: Momentum factor. Must be a scalar.
169/// * beta2: Momentum factor. Must be a scalar.
170/// * epsilon: Ridge term. Must be a scalar.
171/// * grad: The gradient.
172///
173/// Optional attributes (see `Attrs`):
174/// * use_locking: If `True`, updating of the var, m, and v tensors will be protected
175/// by a lock; otherwise the behavior is undefined, but may exhibit less
176/// contention.
177///
178/// Returns:
179/// * the created `Operation`
180class ResourceApplyAdaMax {
181 public:
182 /// Optional attribute setters for ResourceApplyAdaMax
183 struct Attrs {
184 /// If `True`, updating of the var, m, and v tensors will be protected
185 /// by a lock; otherwise the behavior is undefined, but may exhibit less
186 /// contention.
187 ///
188 /// Defaults to false
189 TF_MUST_USE_RESULT Attrs UseLocking(bool x) {
190 Attrs ret = *this;
191 ret.use_locking_ = x;
192 return ret;
193 }
194
195 bool use_locking_ = false;
196 };
197 ResourceApplyAdaMax(const ::tensorflow::Scope& scope, ::tensorflow::Input var,
198 ::tensorflow::Input m, ::tensorflow::Input v,
199 ::tensorflow::Input beta1_power, ::tensorflow::Input lr,
200 ::tensorflow::Input beta1, ::tensorflow::Input beta2,
201 ::tensorflow::Input epsilon, ::tensorflow::Input grad);
202 ResourceApplyAdaMax(const ::tensorflow::Scope& scope, ::tensorflow::Input var,
203 ::tensorflow::Input m, ::tensorflow::Input v,
204 ::tensorflow::Input beta1_power, ::tensorflow::Input lr,
205 ::tensorflow::Input beta1, ::tensorflow::Input beta2,
206 ::tensorflow::Input epsilon, ::tensorflow::Input grad,
207 const ResourceApplyAdaMax::Attrs& attrs);
208 operator ::tensorflow::Operation() const { return operation; }
209
210 static Attrs UseLocking(bool x) {
211 return Attrs().UseLocking(x);
212 }
213
214 Operation operation;
215};
216
217/// Update '*var' according to the adagrad scheme.
218///
219/// accum += grad * grad
220/// var -= lr * grad * (1 / (sqrt(accum) + epsilon))
221///
222/// Args:
223/// * scope: A Scope object
224/// * var: Should be from a Variable().
225/// * accum: Should be from a Variable().
226/// * lr: Scaling factor. Must be a scalar.
227/// * epsilon: Constant factor. Must be a scalar.
228/// * grad: The gradient.
229///
230/// Optional attributes (see `Attrs`):
231/// * use_locking: If `True`, updating of the var and accum tensors will be protected
232/// by a lock; otherwise the behavior is undefined, but may exhibit less
233/// contention.
234///
235/// Returns:
236/// * the created `Operation`
237class ResourceApplyAdagradV2 {
238 public:
239 /// Optional attribute setters for ResourceApplyAdagradV2
240 struct Attrs {
241 /// If `True`, updating of the var and accum tensors will be protected
242 /// by a lock; otherwise the behavior is undefined, but may exhibit less
243 /// contention.
244 ///
245 /// Defaults to false
246 TF_MUST_USE_RESULT Attrs UseLocking(bool x) {
247 Attrs ret = *this;
248 ret.use_locking_ = x;
249 return ret;
250 }
251
252 /// Defaults to true
253 TF_MUST_USE_RESULT Attrs UpdateSlots(bool x) {
254 Attrs ret = *this;
255 ret.update_slots_ = x;
256 return ret;
257 }
258
259 bool use_locking_ = false;
260 bool update_slots_ = true;
261 };
262 ResourceApplyAdagradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
263 var, ::tensorflow::Input accum, ::tensorflow::Input lr,
264 ::tensorflow::Input epsilon, ::tensorflow::Input grad);
265 ResourceApplyAdagradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
266 var, ::tensorflow::Input accum, ::tensorflow::Input lr,
267 ::tensorflow::Input epsilon, ::tensorflow::Input grad,
268 const ResourceApplyAdagradV2::Attrs& attrs);
269 operator ::tensorflow::Operation() const { return operation; }
270
271 static Attrs UseLocking(bool x) {
272 return Attrs().UseLocking(x);
273 }
274 static Attrs UpdateSlots(bool x) {
275 return Attrs().UpdateSlots(x);
276 }
277
278 Operation operation;
279};
280
281/// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
282///
283/// That is for rows we have grad for, we update var and accum as follows:
284/// accum += grad * grad
285/// var -= lr * grad * (1 / sqrt(accum))
286///
287/// Args:
288/// * scope: A Scope object
289/// * var: Should be from a Variable().
290/// * accum: Should be from a Variable().
291/// * lr: Learning rate. Must be a scalar.
292/// * epsilon: Constant factor. Must be a scalar.
293/// * grad: The gradient.
294/// * indices: A vector of indices into the first dimension of var and accum.
295///
296/// Optional attributes (see `Attrs`):
297/// * use_locking: If `True`, updating of the var and accum tensors will be protected
298/// by a lock; otherwise the behavior is undefined, but may exhibit less
299/// contention.
300///
301/// Returns:
302/// * the created `Operation`
303class ResourceSparseApplyAdagradV2 {
304 public:
305 /// Optional attribute setters for ResourceSparseApplyAdagradV2
306 struct Attrs {
307 /// If `True`, updating of the var and accum tensors will be protected
308 /// by a lock; otherwise the behavior is undefined, but may exhibit less
309 /// contention.
310 ///
311 /// Defaults to false
312 TF_MUST_USE_RESULT Attrs UseLocking(bool x) {
313 Attrs ret = *this;
314 ret.use_locking_ = x;
315 return ret;
316 }
317
318 /// Defaults to true
319 TF_MUST_USE_RESULT Attrs UpdateSlots(bool x) {
320 Attrs ret = *this;
321 ret.update_slots_ = x;
322 return ret;
323 }
324
325 bool use_locking_ = false;
326 bool update_slots_ = true;
327 };
328 ResourceSparseApplyAdagradV2(const ::tensorflow::Scope& scope,
329 ::tensorflow::Input var, ::tensorflow::Input
330 accum, ::tensorflow::Input lr, ::tensorflow::Input
331 epsilon, ::tensorflow::Input grad,
332 ::tensorflow::Input indices);
333 ResourceSparseApplyAdagradV2(const ::tensorflow::Scope& scope,
334 ::tensorflow::Input var, ::tensorflow::Input
335 accum, ::tensorflow::Input lr, ::tensorflow::Input
336 epsilon, ::tensorflow::Input grad,
337 ::tensorflow::Input indices, const
338 ResourceSparseApplyAdagradV2::Attrs& attrs);
339 operator ::tensorflow::Operation() const { return operation; }
340
341 static Attrs UseLocking(bool x) {
342 return Attrs().UseLocking(x);
343 }
344 static Attrs UpdateSlots(bool x) {
345 return Attrs().UpdateSlots(x);
346 }
347
348 Operation operation;
349};
350
351/// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
352///
353/// That is for rows we have grad for, we update var and accum as follows:
354/// $$accum += grad * grad$$
355/// $$var -= lr * grad * (1 / sqrt(accum))$$
356///
357/// Args:
358/// * scope: A Scope object
359/// * var: Should be from a Variable().
360/// * accum: Should be from a Variable().
361/// * lr: Learning rate. Must be a scalar.
362/// * epsilon: Constant factor. Must be a scalar.
363/// * grad: The gradient.
364/// * indices: A vector of indices into the first dimension of var and accum.
365///
366/// Optional attributes (see `Attrs`):
367/// * use_locking: If `True`, updating of the var and accum tensors will be protected
368/// by a lock; otherwise the behavior is undefined, but may exhibit less
369/// contention.
370///
371/// Returns:
372/// * `Output`: Same as "var".
373class SparseApplyAdagradV2 {
374 public:
375 /// Optional attribute setters for SparseApplyAdagradV2
376 struct Attrs {
377 /// If `True`, updating of the var and accum tensors will be protected
378 /// by a lock; otherwise the behavior is undefined, but may exhibit less
379 /// contention.
380 ///
381 /// Defaults to false
382 TF_MUST_USE_RESULT Attrs UseLocking(bool x) {
383 Attrs ret = *this;
384 ret.use_locking_ = x;
385 return ret;
386 }
387
388 /// Defaults to true
389 TF_MUST_USE_RESULT Attrs UpdateSlots(bool x) {
390 Attrs ret = *this;
391 ret.update_slots_ = x;
392 return ret;
393 }
394
395 bool use_locking_ = false;
396 bool update_slots_ = true;
397 };
398 SparseApplyAdagradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input var,
399 ::tensorflow::Input accum, ::tensorflow::Input lr,
400 ::tensorflow::Input epsilon, ::tensorflow::Input grad,
401 ::tensorflow::Input indices);
402 SparseApplyAdagradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input var,
403 ::tensorflow::Input accum, ::tensorflow::Input lr,
404 ::tensorflow::Input epsilon, ::tensorflow::Input grad,
405 ::tensorflow::Input indices, const
406 SparseApplyAdagradV2::Attrs& attrs);
407 operator ::tensorflow::Output() const { return out; }
408 operator ::tensorflow::Input() const { return out; }
409 ::tensorflow::Node* node() const { return out.node(); }
410
411 static Attrs UseLocking(bool x) {
412 return Attrs().UseLocking(x);
413 }
414 static Attrs UpdateSlots(bool x) {
415 return Attrs().UpdateSlots(x);
416 }
417
418 Operation operation;
419 ::tensorflow::Output out;
420};
421
422} // namespace internal
423} // namespace ops
424} // namespace tensorflow
425
426#endif // TENSORFLOW_CC_OPS_TRAINING_OPS_INTERNAL_H_
427