1 | // This file is MACHINE GENERATED! Do not edit. |
2 | |
3 | #ifndef TENSORFLOW_CC_OPS_TRAINING_OPS_H_ |
4 | #define TENSORFLOW_CC_OPS_TRAINING_OPS_H_ |
5 | |
6 | // This file is MACHINE GENERATED! Do not edit. |
7 | |
8 | #include "tensorflow/cc/framework/ops.h" |
9 | #include "tensorflow/cc/framework/scope.h" |
10 | #include "tensorflow/core/framework/tensor.h" |
11 | #include "tensorflow/core/framework/tensor_shape.h" |
12 | #include "tensorflow/core/framework/types.h" |
13 | #include "tensorflow/core/lib/gtl/array_slice.h" |
14 | |
15 | namespace tensorflow { |
16 | namespace ops { |
17 | |
18 | /// @defgroup training_ops Training Ops |
19 | /// @{ |
20 | |
21 | /// Update '*var' according to the adadelta scheme. |
22 | /// |
23 | /// accum = rho() * accum + (1 - rho()) * grad.square(); |
24 | /// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; |
25 | /// update_accum = rho() * update_accum + (1 - rho()) * update.square(); |
26 | /// var -= update; |
27 | /// |
28 | /// Args: |
29 | /// * scope: A Scope object |
30 | /// * var: Should be from a Variable(). |
31 | /// * accum: Should be from a Variable(). |
32 | /// * accum_update: Should be from a Variable(). |
33 | /// * lr: Scaling factor. Must be a scalar. |
34 | /// * rho: Decay factor. Must be a scalar. |
35 | /// * epsilon: Constant factor. Must be a scalar. |
36 | /// * grad: The gradient. |
37 | /// |
38 | /// Optional attributes (see `Attrs`): |
39 | /// * use_locking: If True, updating of the var, accum and update_accum tensors will be protected by |
40 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
41 | /// |
42 | /// Returns: |
43 | /// * `Output`: Same as "var". |
44 | class ApplyAdadelta { |
45 | public: |
46 | /// Optional attribute setters for ApplyAdadelta |
47 | struct Attrs { |
48 | /// If True, updating of the var, accum and update_accum tensors will be protected by |
49 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
50 | /// |
51 | /// Defaults to false |
52 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
53 | Attrs ret = *this; |
54 | ret.use_locking_ = x; |
55 | return ret; |
56 | } |
57 | |
58 | bool use_locking_ = false; |
59 | }; |
60 | ApplyAdadelta(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
61 | ::tensorflow::Input accum, ::tensorflow::Input accum_update, |
62 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
63 | ::tensorflow::Input epsilon, ::tensorflow::Input grad); |
64 | ApplyAdadelta(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
65 | ::tensorflow::Input accum, ::tensorflow::Input accum_update, |
66 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
67 | ::tensorflow::Input epsilon, ::tensorflow::Input grad, const |
68 | ApplyAdadelta::Attrs& attrs); |
69 | operator ::tensorflow::Output() const { return out; } |
70 | operator ::tensorflow::Input() const { return out; } |
71 | ::tensorflow::Node* node() const { return out.node(); } |
72 | |
73 | static Attrs UseLocking(bool x) { |
74 | return Attrs().UseLocking(x); |
75 | } |
76 | |
77 | Operation operation; |
78 | ::tensorflow::Output out; |
79 | }; |
80 | |
81 | /// Update '*var' according to the adagrad scheme. |
82 | /// |
83 | /// accum += grad * grad |
84 | /// var -= lr * grad * (1 / sqrt(accum)) |
85 | /// |
86 | /// Args: |
87 | /// * scope: A Scope object |
88 | /// * var: Should be from a Variable(). |
89 | /// * accum: Should be from a Variable(). |
90 | /// * lr: Scaling factor. Must be a scalar. |
91 | /// * grad: The gradient. |
92 | /// |
93 | /// Optional attributes (see `Attrs`): |
94 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
95 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
96 | /// contention. |
97 | /// |
98 | /// Returns: |
99 | /// * `Output`: Same as "var". |
100 | class ApplyAdagrad { |
101 | public: |
102 | /// Optional attribute setters for ApplyAdagrad |
103 | struct Attrs { |
104 | /// If `True`, updating of the var and accum tensors will be protected |
105 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
106 | /// contention. |
107 | /// |
108 | /// Defaults to false |
109 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
110 | Attrs ret = *this; |
111 | ret.use_locking_ = x; |
112 | return ret; |
113 | } |
114 | |
115 | /// Defaults to true |
116 | TF_MUST_USE_RESULT Attrs UpdateSlots(bool x) { |
117 | Attrs ret = *this; |
118 | ret.update_slots_ = x; |
119 | return ret; |
120 | } |
121 | |
122 | bool use_locking_ = false; |
123 | bool update_slots_ = true; |
124 | }; |
125 | ApplyAdagrad(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
126 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
127 | ::tensorflow::Input grad); |
128 | ApplyAdagrad(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
129 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
130 | ::tensorflow::Input grad, const ApplyAdagrad::Attrs& attrs); |
131 | operator ::tensorflow::Output() const { return out; } |
132 | operator ::tensorflow::Input() const { return out; } |
133 | ::tensorflow::Node* node() const { return out.node(); } |
134 | |
135 | static Attrs UseLocking(bool x) { |
136 | return Attrs().UseLocking(x); |
137 | } |
138 | static Attrs UpdateSlots(bool x) { |
139 | return Attrs().UpdateSlots(x); |
140 | } |
141 | |
142 | Operation operation; |
143 | ::tensorflow::Output out; |
144 | }; |
145 | |
146 | /// Update '*var' according to the proximal adagrad scheme. |
147 | /// |
148 | /// Args: |
149 | /// * scope: A Scope object |
150 | /// * var: Should be from a Variable(). |
151 | /// * gradient_accumulator: Should be from a Variable(). |
152 | /// * gradient_squared_accumulator: Should be from a Variable(). |
153 | /// * grad: The gradient. |
154 | /// * lr: Scaling factor. Must be a scalar. |
155 | /// * l1: L1 regularization. Must be a scalar. |
156 | /// * l2: L2 regularization. Must be a scalar. |
157 | /// * global_step: Training step number. Must be a scalar. |
158 | /// |
159 | /// Optional attributes (see `Attrs`): |
160 | /// * use_locking: If True, updating of the var and accum tensors will be protected by |
161 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
162 | /// |
163 | /// Returns: |
164 | /// * `Output`: Same as "var". |
165 | class ApplyAdagradDA { |
166 | public: |
167 | /// Optional attribute setters for ApplyAdagradDA |
168 | struct Attrs { |
169 | /// If True, updating of the var and accum tensors will be protected by |
170 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
171 | /// |
172 | /// Defaults to false |
173 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
174 | Attrs ret = *this; |
175 | ret.use_locking_ = x; |
176 | return ret; |
177 | } |
178 | |
179 | bool use_locking_ = false; |
180 | }; |
181 | ApplyAdagradDA(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
182 | ::tensorflow::Input gradient_accumulator, ::tensorflow::Input |
183 | gradient_squared_accumulator, ::tensorflow::Input grad, |
184 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
185 | ::tensorflow::Input l2, ::tensorflow::Input global_step); |
186 | ApplyAdagradDA(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
187 | ::tensorflow::Input gradient_accumulator, ::tensorflow::Input |
188 | gradient_squared_accumulator, ::tensorflow::Input grad, |
189 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
190 | ::tensorflow::Input l2, ::tensorflow::Input global_step, const |
191 | ApplyAdagradDA::Attrs& attrs); |
192 | operator ::tensorflow::Output() const { return out; } |
193 | operator ::tensorflow::Input() const { return out; } |
194 | ::tensorflow::Node* node() const { return out.node(); } |
195 | |
196 | static Attrs UseLocking(bool x) { |
197 | return Attrs().UseLocking(x); |
198 | } |
199 | |
200 | Operation operation; |
201 | ::tensorflow::Output out; |
202 | }; |
203 | |
204 | /// Update '*var' according to the Adam algorithm. |
205 | /// |
206 | /// $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ |
207 | /// $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ |
208 | /// $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ |
209 | /// $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ |
210 | /// |
211 | /// Args: |
212 | /// * scope: A Scope object |
213 | /// * var: Should be from a Variable(). |
214 | /// * m: Should be from a Variable(). |
215 | /// * v: Should be from a Variable(). |
216 | /// * beta1_power: Must be a scalar. |
217 | /// * beta2_power: Must be a scalar. |
218 | /// * lr: Scaling factor. Must be a scalar. |
219 | /// * beta1: Momentum factor. Must be a scalar. |
220 | /// * beta2: Momentum factor. Must be a scalar. |
221 | /// * epsilon: Ridge term. Must be a scalar. |
222 | /// * grad: The gradient. |
223 | /// |
224 | /// Optional attributes (see `Attrs`): |
225 | /// * use_locking: If `True`, updating of the var, m, and v tensors will be protected |
226 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
227 | /// contention. |
228 | /// * use_nesterov: If `True`, uses the nesterov update. |
229 | /// |
230 | /// Returns: |
231 | /// * `Output`: Same as "var". |
232 | class ApplyAdam { |
233 | public: |
234 | /// Optional attribute setters for ApplyAdam |
235 | struct Attrs { |
236 | /// If `True`, updating of the var, m, and v tensors will be protected |
237 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
238 | /// contention. |
239 | /// |
240 | /// Defaults to false |
241 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
242 | Attrs ret = *this; |
243 | ret.use_locking_ = x; |
244 | return ret; |
245 | } |
246 | |
247 | /// If `True`, uses the nesterov update. |
248 | /// |
249 | /// Defaults to false |
250 | TF_MUST_USE_RESULT Attrs UseNesterov(bool x) { |
251 | Attrs ret = *this; |
252 | ret.use_nesterov_ = x; |
253 | return ret; |
254 | } |
255 | |
256 | bool use_locking_ = false; |
257 | bool use_nesterov_ = false; |
258 | }; |
259 | ApplyAdam(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
260 | ::tensorflow::Input m, ::tensorflow::Input v, ::tensorflow::Input |
261 | beta1_power, ::tensorflow::Input beta2_power, ::tensorflow::Input lr, |
262 | ::tensorflow::Input beta1, ::tensorflow::Input beta2, |
263 | ::tensorflow::Input epsilon, ::tensorflow::Input grad); |
264 | ApplyAdam(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
265 | ::tensorflow::Input m, ::tensorflow::Input v, ::tensorflow::Input |
266 | beta1_power, ::tensorflow::Input beta2_power, ::tensorflow::Input lr, |
267 | ::tensorflow::Input beta1, ::tensorflow::Input beta2, |
268 | ::tensorflow::Input epsilon, ::tensorflow::Input grad, const |
269 | ApplyAdam::Attrs& attrs); |
270 | operator ::tensorflow::Output() const { return out; } |
271 | operator ::tensorflow::Input() const { return out; } |
272 | ::tensorflow::Node* node() const { return out.node(); } |
273 | |
274 | static Attrs UseLocking(bool x) { |
275 | return Attrs().UseLocking(x); |
276 | } |
277 | static Attrs UseNesterov(bool x) { |
278 | return Attrs().UseNesterov(x); |
279 | } |
280 | |
281 | Operation operation; |
282 | ::tensorflow::Output out; |
283 | }; |
284 | |
285 | /// Update '*var' according to the AddSign update. |
286 | /// |
287 | /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g |
288 | /// update <- (alpha + sign_decay * sign(g) *sign(m)) * g |
289 | /// variable <- variable - lr_t * update |
290 | /// |
291 | /// Args: |
292 | /// * scope: A Scope object |
293 | /// * var: Should be from a Variable(). |
294 | /// * m: Should be from a Variable(). |
295 | /// * lr: Scaling factor. Must be a scalar. |
296 | /// * alpha: Must be a scalar. |
297 | /// * sign_decay: Must be a scalar. |
298 | /// * beta: Must be a scalar. |
299 | /// * grad: The gradient. |
300 | /// |
301 | /// Optional attributes (see `Attrs`): |
302 | /// * use_locking: If `True`, updating of the var and m tensors is |
303 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
304 | /// contention. |
305 | /// |
306 | /// Returns: |
307 | /// * `Output`: Same as "var". |
308 | class ApplyAddSign { |
309 | public: |
310 | /// Optional attribute setters for ApplyAddSign |
311 | struct Attrs { |
312 | /// If `True`, updating of the var and m tensors is |
313 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
314 | /// contention. |
315 | /// |
316 | /// Defaults to false |
317 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
318 | Attrs ret = *this; |
319 | ret.use_locking_ = x; |
320 | return ret; |
321 | } |
322 | |
323 | bool use_locking_ = false; |
324 | }; |
325 | ApplyAddSign(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
326 | ::tensorflow::Input m, ::tensorflow::Input lr, ::tensorflow::Input |
327 | alpha, ::tensorflow::Input sign_decay, ::tensorflow::Input beta, |
328 | ::tensorflow::Input grad); |
329 | ApplyAddSign(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
330 | ::tensorflow::Input m, ::tensorflow::Input lr, ::tensorflow::Input |
331 | alpha, ::tensorflow::Input sign_decay, ::tensorflow::Input beta, |
332 | ::tensorflow::Input grad, const ApplyAddSign::Attrs& attrs); |
333 | operator ::tensorflow::Output() const { return out; } |
334 | operator ::tensorflow::Input() const { return out; } |
335 | ::tensorflow::Node* node() const { return out.node(); } |
336 | |
337 | static Attrs UseLocking(bool x) { |
338 | return Attrs().UseLocking(x); |
339 | } |
340 | |
341 | Operation operation; |
342 | ::tensorflow::Output out; |
343 | }; |
344 | |
345 | /// Update '*var' according to the centered RMSProp algorithm. |
346 | /// |
347 | /// The centered RMSProp algorithm uses an estimate of the centered second moment |
348 | /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which |
349 | /// uses the (uncentered) second moment. This often helps with training, but is |
350 | /// slightly more expensive in terms of computation and memory. |
351 | /// |
352 | /// Note that in dense implementation of this algorithm, mg, ms, and mom will |
353 | /// update even if the grad is zero, but in this sparse implementation, mg, ms, |
354 | /// and mom will not update in iterations during which the grad is zero. |
355 | /// |
356 | /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 |
357 | /// mean_grad = decay * mean_grad + (1-decay) * gradient |
358 | /// |
359 | /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) |
360 | /// |
361 | /// mg <- rho * mg_{t-1} + (1-rho) * grad |
362 | /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad |
363 | /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) |
364 | /// var <- var - mom |
365 | /// |
366 | /// Args: |
367 | /// * scope: A Scope object |
368 | /// * var: Should be from a Variable(). |
369 | /// * mg: Should be from a Variable(). |
370 | /// * ms: Should be from a Variable(). |
371 | /// * mom: Should be from a Variable(). |
372 | /// * lr: Scaling factor. Must be a scalar. |
373 | /// * rho: Decay rate. Must be a scalar. |
374 | /// * momentum: Momentum Scale. Must be a scalar. |
375 | /// * epsilon: Ridge term. Must be a scalar. |
376 | /// * grad: The gradient. |
377 | /// |
378 | /// Optional attributes (see `Attrs`): |
379 | /// * use_locking: If `True`, updating of the var, mg, ms, and mom tensors is |
380 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
381 | /// contention. |
382 | /// |
383 | /// Returns: |
384 | /// * `Output`: Same as "var". |
385 | class ApplyCenteredRMSProp { |
386 | public: |
387 | /// Optional attribute setters for ApplyCenteredRMSProp |
388 | struct Attrs { |
389 | /// If `True`, updating of the var, mg, ms, and mom tensors is |
390 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
391 | /// contention. |
392 | /// |
393 | /// Defaults to false |
394 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
395 | Attrs ret = *this; |
396 | ret.use_locking_ = x; |
397 | return ret; |
398 | } |
399 | |
400 | bool use_locking_ = false; |
401 | }; |
402 | ApplyCenteredRMSProp(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
403 | ::tensorflow::Input mg, ::tensorflow::Input ms, |
404 | ::tensorflow::Input mom, ::tensorflow::Input lr, |
405 | ::tensorflow::Input rho, ::tensorflow::Input momentum, |
406 | ::tensorflow::Input epsilon, ::tensorflow::Input grad); |
407 | ApplyCenteredRMSProp(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
408 | ::tensorflow::Input mg, ::tensorflow::Input ms, |
409 | ::tensorflow::Input mom, ::tensorflow::Input lr, |
410 | ::tensorflow::Input rho, ::tensorflow::Input momentum, |
411 | ::tensorflow::Input epsilon, ::tensorflow::Input grad, |
412 | const ApplyCenteredRMSProp::Attrs& attrs); |
413 | operator ::tensorflow::Output() const { return out; } |
414 | operator ::tensorflow::Input() const { return out; } |
415 | ::tensorflow::Node* node() const { return out.node(); } |
416 | |
417 | static Attrs UseLocking(bool x) { |
418 | return Attrs().UseLocking(x); |
419 | } |
420 | |
421 | Operation operation; |
422 | ::tensorflow::Output out; |
423 | }; |
424 | |
425 | /// Update '*var' according to the Ftrl-proximal scheme. |
426 | /// |
427 | /// accum_new = accum + grad * grad |
428 | /// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var |
429 | /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 |
430 | /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 |
431 | /// accum = accum_new |
432 | /// |
433 | /// Args: |
434 | /// * scope: A Scope object |
435 | /// * var: Should be from a Variable(). |
436 | /// * accum: Should be from a Variable(). |
437 | /// * linear: Should be from a Variable(). |
438 | /// * grad: The gradient. |
439 | /// * lr: Scaling factor. Must be a scalar. |
440 | /// * l1: L1 regularization. Must be a scalar. |
441 | /// * l2: L2 regularization. Must be a scalar. |
442 | /// * lr_power: Scaling factor. Must be a scalar. |
443 | /// |
444 | /// Optional attributes (see `Attrs`): |
445 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
446 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
447 | /// contention. |
448 | /// |
449 | /// Returns: |
450 | /// * `Output`: Same as "var". |
451 | class ApplyFtrl { |
452 | public: |
453 | /// Optional attribute setters for ApplyFtrl |
454 | struct Attrs { |
455 | /// If `True`, updating of the var and accum tensors will be protected |
456 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
457 | /// contention. |
458 | /// |
459 | /// Defaults to false |
460 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
461 | Attrs ret = *this; |
462 | ret.use_locking_ = x; |
463 | return ret; |
464 | } |
465 | |
466 | /// Defaults to false |
467 | TF_MUST_USE_RESULT Attrs MultiplyLinearByLr(bool x) { |
468 | Attrs ret = *this; |
469 | ret.multiply_linear_by_lr_ = x; |
470 | return ret; |
471 | } |
472 | |
473 | bool use_locking_ = false; |
474 | bool multiply_linear_by_lr_ = false; |
475 | }; |
476 | ApplyFtrl(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
477 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
478 | ::tensorflow::Input grad, ::tensorflow::Input lr, ::tensorflow::Input |
479 | l1, ::tensorflow::Input l2, ::tensorflow::Input lr_power); |
480 | ApplyFtrl(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
481 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
482 | ::tensorflow::Input grad, ::tensorflow::Input lr, ::tensorflow::Input |
483 | l1, ::tensorflow::Input l2, ::tensorflow::Input lr_power, const |
484 | ApplyFtrl::Attrs& attrs); |
485 | operator ::tensorflow::Output() const { return out; } |
486 | operator ::tensorflow::Input() const { return out; } |
487 | ::tensorflow::Node* node() const { return out.node(); } |
488 | |
489 | static Attrs UseLocking(bool x) { |
490 | return Attrs().UseLocking(x); |
491 | } |
492 | static Attrs MultiplyLinearByLr(bool x) { |
493 | return Attrs().MultiplyLinearByLr(x); |
494 | } |
495 | |
496 | Operation operation; |
497 | ::tensorflow::Output out; |
498 | }; |
499 | |
500 | /// Update '*var' according to the Ftrl-proximal scheme. |
501 | /// |
502 | /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var |
503 | /// accum_new = accum + grad * grad |
504 | /// linear += grad_with_shrinkage - |
505 | /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var |
506 | /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 |
507 | /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 |
508 | /// accum = accum_new |
509 | /// |
510 | /// Args: |
511 | /// * scope: A Scope object |
512 | /// * var: Should be from a Variable(). |
513 | /// * accum: Should be from a Variable(). |
514 | /// * linear: Should be from a Variable(). |
515 | /// * grad: The gradient. |
516 | /// * lr: Scaling factor. Must be a scalar. |
517 | /// * l1: L1 regularization. Must be a scalar. |
518 | /// * l2: L2 shrinkage regularization. Must be a scalar. |
519 | /// * lr_power: Scaling factor. Must be a scalar. |
520 | /// |
521 | /// Optional attributes (see `Attrs`): |
522 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
523 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
524 | /// contention. |
525 | /// |
526 | /// Returns: |
527 | /// * `Output`: Same as "var". |
528 | class ApplyFtrlV2 { |
529 | public: |
530 | /// Optional attribute setters for ApplyFtrlV2 |
531 | struct Attrs { |
532 | /// If `True`, updating of the var and accum tensors will be protected |
533 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
534 | /// contention. |
535 | /// |
536 | /// Defaults to false |
537 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
538 | Attrs ret = *this; |
539 | ret.use_locking_ = x; |
540 | return ret; |
541 | } |
542 | |
543 | /// Defaults to false |
544 | TF_MUST_USE_RESULT Attrs MultiplyLinearByLr(bool x) { |
545 | Attrs ret = *this; |
546 | ret.multiply_linear_by_lr_ = x; |
547 | return ret; |
548 | } |
549 | |
550 | bool use_locking_ = false; |
551 | bool multiply_linear_by_lr_ = false; |
552 | }; |
553 | ApplyFtrlV2(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
554 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
555 | ::tensorflow::Input grad, ::tensorflow::Input lr, |
556 | ::tensorflow::Input l1, ::tensorflow::Input l2, ::tensorflow::Input |
557 | l2_shrinkage, ::tensorflow::Input lr_power); |
558 | ApplyFtrlV2(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
559 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
560 | ::tensorflow::Input grad, ::tensorflow::Input lr, |
561 | ::tensorflow::Input l1, ::tensorflow::Input l2, ::tensorflow::Input |
562 | l2_shrinkage, ::tensorflow::Input lr_power, const |
563 | ApplyFtrlV2::Attrs& attrs); |
564 | operator ::tensorflow::Output() const { return out; } |
565 | operator ::tensorflow::Input() const { return out; } |
566 | ::tensorflow::Node* node() const { return out.node(); } |
567 | |
568 | static Attrs UseLocking(bool x) { |
569 | return Attrs().UseLocking(x); |
570 | } |
571 | static Attrs MultiplyLinearByLr(bool x) { |
572 | return Attrs().MultiplyLinearByLr(x); |
573 | } |
574 | |
575 | Operation operation; |
576 | ::tensorflow::Output out; |
577 | }; |
578 | |
579 | /// Update '*var' by subtracting 'alpha' * 'delta' from it. |
580 | /// |
581 | /// Args: |
582 | /// * scope: A Scope object |
583 | /// * var: Should be from a Variable(). |
584 | /// * alpha: Scaling factor. Must be a scalar. |
585 | /// * delta: The change. |
586 | /// |
587 | /// Optional attributes (see `Attrs`): |
588 | /// * use_locking: If `True`, the subtraction will be protected by a lock; |
589 | /// otherwise the behavior is undefined, but may exhibit less contention. |
590 | /// |
591 | /// Returns: |
592 | /// * `Output`: Same as "var". |
593 | class ApplyGradientDescent { |
594 | public: |
595 | /// Optional attribute setters for ApplyGradientDescent |
596 | struct Attrs { |
597 | /// If `True`, the subtraction will be protected by a lock; |
598 | /// otherwise the behavior is undefined, but may exhibit less contention. |
599 | /// |
600 | /// Defaults to false |
601 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
602 | Attrs ret = *this; |
603 | ret.use_locking_ = x; |
604 | return ret; |
605 | } |
606 | |
607 | bool use_locking_ = false; |
608 | }; |
609 | ApplyGradientDescent(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
610 | ::tensorflow::Input alpha, ::tensorflow::Input delta); |
611 | ApplyGradientDescent(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
612 | ::tensorflow::Input alpha, ::tensorflow::Input delta, |
613 | const ApplyGradientDescent::Attrs& attrs); |
614 | operator ::tensorflow::Output() const { return out; } |
615 | operator ::tensorflow::Input() const { return out; } |
616 | ::tensorflow::Node* node() const { return out.node(); } |
617 | |
618 | static Attrs UseLocking(bool x) { |
619 | return Attrs().UseLocking(x); |
620 | } |
621 | |
622 | Operation operation; |
623 | ::tensorflow::Output out; |
624 | }; |
625 | |
626 | /// Update '*var' according to the momentum scheme. |
627 | /// |
628 | /// Set use_nesterov = True if you want to use Nesterov momentum. |
629 | /// |
630 | /// accum = accum * momentum + grad |
631 | /// var -= lr * accum |
632 | /// |
633 | /// Args: |
634 | /// * scope: A Scope object |
635 | /// * var: Should be from a Variable(). |
636 | /// * accum: Should be from a Variable(). |
637 | /// * lr: Scaling factor. Must be a scalar. |
638 | /// * grad: The gradient. |
639 | /// * momentum: Momentum. Must be a scalar. |
640 | /// |
641 | /// Optional attributes (see `Attrs`): |
642 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
643 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
644 | /// contention. |
645 | /// * use_nesterov: If `True`, the tensor passed to compute grad will be |
646 | /// var - lr * momentum * accum, so in the end, the var you get is actually |
647 | /// var - lr * momentum * accum. |
648 | /// |
649 | /// Returns: |
650 | /// * `Output`: Same as "var". |
651 | class ApplyMomentum { |
652 | public: |
653 | /// Optional attribute setters for ApplyMomentum |
654 | struct Attrs { |
655 | /// If `True`, updating of the var and accum tensors will be protected |
656 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
657 | /// contention. |
658 | /// |
659 | /// Defaults to false |
660 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
661 | Attrs ret = *this; |
662 | ret.use_locking_ = x; |
663 | return ret; |
664 | } |
665 | |
666 | /// If `True`, the tensor passed to compute grad will be |
667 | /// var - lr * momentum * accum, so in the end, the var you get is actually |
668 | /// var - lr * momentum * accum. |
669 | /// |
670 | /// Defaults to false |
671 | TF_MUST_USE_RESULT Attrs UseNesterov(bool x) { |
672 | Attrs ret = *this; |
673 | ret.use_nesterov_ = x; |
674 | return ret; |
675 | } |
676 | |
677 | bool use_locking_ = false; |
678 | bool use_nesterov_ = false; |
679 | }; |
680 | ApplyMomentum(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
681 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
682 | ::tensorflow::Input grad, ::tensorflow::Input momentum); |
683 | ApplyMomentum(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
684 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
685 | ::tensorflow::Input grad, ::tensorflow::Input momentum, const |
686 | ApplyMomentum::Attrs& attrs); |
687 | operator ::tensorflow::Output() const { return out; } |
688 | operator ::tensorflow::Input() const { return out; } |
689 | ::tensorflow::Node* node() const { return out.node(); } |
690 | |
691 | static Attrs UseLocking(bool x) { |
692 | return Attrs().UseLocking(x); |
693 | } |
694 | static Attrs UseNesterov(bool x) { |
695 | return Attrs().UseNesterov(x); |
696 | } |
697 | |
698 | Operation operation; |
699 | ::tensorflow::Output out; |
700 | }; |
701 | |
702 | /// Update '*var' according to the AddSign update. |
703 | /// |
704 | /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g |
705 | /// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g |
706 | /// variable <- variable - lr_t * update |
707 | /// |
708 | /// Args: |
709 | /// * scope: A Scope object |
710 | /// * var: Should be from a Variable(). |
711 | /// * m: Should be from a Variable(). |
712 | /// * lr: Scaling factor. Must be a scalar. |
713 | /// * logbase: Must be a scalar. |
714 | /// * sign_decay: Must be a scalar. |
715 | /// * beta: Must be a scalar. |
716 | /// * grad: The gradient. |
717 | /// |
718 | /// Optional attributes (see `Attrs`): |
719 | /// * use_locking: If `True`, updating of the var and m tensors is |
720 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
721 | /// contention. |
722 | /// |
723 | /// Returns: |
724 | /// * `Output`: Same as "var". |
725 | class ApplyPowerSign { |
726 | public: |
727 | /// Optional attribute setters for ApplyPowerSign |
728 | struct Attrs { |
729 | /// If `True`, updating of the var and m tensors is |
730 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
731 | /// contention. |
732 | /// |
733 | /// Defaults to false |
734 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
735 | Attrs ret = *this; |
736 | ret.use_locking_ = x; |
737 | return ret; |
738 | } |
739 | |
740 | bool use_locking_ = false; |
741 | }; |
742 | ApplyPowerSign(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
743 | ::tensorflow::Input m, ::tensorflow::Input lr, |
744 | ::tensorflow::Input logbase, ::tensorflow::Input sign_decay, |
745 | ::tensorflow::Input beta, ::tensorflow::Input grad); |
746 | ApplyPowerSign(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
747 | ::tensorflow::Input m, ::tensorflow::Input lr, |
748 | ::tensorflow::Input logbase, ::tensorflow::Input sign_decay, |
749 | ::tensorflow::Input beta, ::tensorflow::Input grad, const |
750 | ApplyPowerSign::Attrs& attrs); |
751 | operator ::tensorflow::Output() const { return out; } |
752 | operator ::tensorflow::Input() const { return out; } |
753 | ::tensorflow::Node* node() const { return out.node(); } |
754 | |
755 | static Attrs UseLocking(bool x) { |
756 | return Attrs().UseLocking(x); |
757 | } |
758 | |
759 | Operation operation; |
760 | ::tensorflow::Output out; |
761 | }; |
762 | |
763 | /// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. |
764 | /// |
765 | /// accum += grad * grad |
766 | /// prox_v = var - lr * grad * (1 / sqrt(accum)) |
767 | /// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} |
768 | /// |
769 | /// Args: |
770 | /// * scope: A Scope object |
771 | /// * var: Should be from a Variable(). |
772 | /// * accum: Should be from a Variable(). |
773 | /// * lr: Scaling factor. Must be a scalar. |
774 | /// * l1: L1 regularization. Must be a scalar. |
775 | /// * l2: L2 regularization. Must be a scalar. |
776 | /// * grad: The gradient. |
777 | /// |
778 | /// Optional attributes (see `Attrs`): |
779 | /// * use_locking: If True, updating of the var and accum tensors will be protected by |
780 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
781 | /// |
782 | /// Returns: |
783 | /// * `Output`: Same as "var". |
784 | class ApplyProximalAdagrad { |
785 | public: |
786 | /// Optional attribute setters for ApplyProximalAdagrad |
787 | struct Attrs { |
788 | /// If True, updating of the var and accum tensors will be protected by |
789 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
790 | /// |
791 | /// Defaults to false |
792 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
793 | Attrs ret = *this; |
794 | ret.use_locking_ = x; |
795 | return ret; |
796 | } |
797 | |
798 | bool use_locking_ = false; |
799 | }; |
800 | ApplyProximalAdagrad(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
801 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
802 | ::tensorflow::Input l1, ::tensorflow::Input l2, |
803 | ::tensorflow::Input grad); |
804 | ApplyProximalAdagrad(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
805 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
806 | ::tensorflow::Input l1, ::tensorflow::Input l2, |
807 | ::tensorflow::Input grad, const |
808 | ApplyProximalAdagrad::Attrs& attrs); |
809 | operator ::tensorflow::Output() const { return out; } |
810 | operator ::tensorflow::Input() const { return out; } |
811 | ::tensorflow::Node* node() const { return out.node(); } |
812 | |
813 | static Attrs UseLocking(bool x) { |
814 | return Attrs().UseLocking(x); |
815 | } |
816 | |
817 | Operation operation; |
818 | ::tensorflow::Output out; |
819 | }; |
820 | |
821 | /// Update '*var' as FOBOS algorithm with fixed learning rate. |
822 | /// |
823 | /// prox_v = var - alpha * delta |
824 | /// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} |
825 | /// |
826 | /// Args: |
827 | /// * scope: A Scope object |
828 | /// * var: Should be from a Variable(). |
829 | /// * alpha: Scaling factor. Must be a scalar. |
830 | /// * l1: L1 regularization. Must be a scalar. |
831 | /// * l2: L2 regularization. Must be a scalar. |
832 | /// * delta: The change. |
833 | /// |
834 | /// Optional attributes (see `Attrs`): |
835 | /// * use_locking: If True, the subtraction will be protected by a lock; |
836 | /// otherwise the behavior is undefined, but may exhibit less contention. |
837 | /// |
838 | /// Returns: |
839 | /// * `Output`: Same as "var". |
840 | class ApplyProximalGradientDescent { |
841 | public: |
842 | /// Optional attribute setters for ApplyProximalGradientDescent |
843 | struct Attrs { |
844 | /// If True, the subtraction will be protected by a lock; |
845 | /// otherwise the behavior is undefined, but may exhibit less contention. |
846 | /// |
847 | /// Defaults to false |
848 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
849 | Attrs ret = *this; |
850 | ret.use_locking_ = x; |
851 | return ret; |
852 | } |
853 | |
854 | bool use_locking_ = false; |
855 | }; |
856 | ApplyProximalGradientDescent(const ::tensorflow::Scope& scope, |
857 | ::tensorflow::Input var, ::tensorflow::Input |
858 | alpha, ::tensorflow::Input l1, ::tensorflow::Input |
859 | l2, ::tensorflow::Input delta); |
860 | ApplyProximalGradientDescent(const ::tensorflow::Scope& scope, |
861 | ::tensorflow::Input var, ::tensorflow::Input |
862 | alpha, ::tensorflow::Input l1, ::tensorflow::Input |
863 | l2, ::tensorflow::Input delta, const |
864 | ApplyProximalGradientDescent::Attrs& attrs); |
865 | operator ::tensorflow::Output() const { return out; } |
866 | operator ::tensorflow::Input() const { return out; } |
867 | ::tensorflow::Node* node() const { return out.node(); } |
868 | |
869 | static Attrs UseLocking(bool x) { |
870 | return Attrs().UseLocking(x); |
871 | } |
872 | |
873 | Operation operation; |
874 | ::tensorflow::Output out; |
875 | }; |
876 | |
877 | /// Update '*var' according to the RMSProp algorithm. |
878 | /// |
879 | /// Note that in dense implementation of this algorithm, ms and mom will |
880 | /// update even if the grad is zero, but in this sparse implementation, ms |
881 | /// and mom will not update in iterations during which the grad is zero. |
882 | /// |
883 | /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 |
884 | /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) |
885 | /// |
886 | /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad |
887 | /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) |
888 | /// var <- var - mom |
889 | /// |
890 | /// Args: |
891 | /// * scope: A Scope object |
892 | /// * var: Should be from a Variable(). |
893 | /// * ms: Should be from a Variable(). |
894 | /// * mom: Should be from a Variable(). |
895 | /// * lr: Scaling factor. Must be a scalar. |
896 | /// * rho: Decay rate. Must be a scalar. |
897 | /// * epsilon: Ridge term. Must be a scalar. |
898 | /// * grad: The gradient. |
899 | /// |
900 | /// Optional attributes (see `Attrs`): |
901 | /// * use_locking: If `True`, updating of the var, ms, and mom tensors is protected |
902 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
903 | /// contention. |
904 | /// |
905 | /// Returns: |
906 | /// * `Output`: Same as "var". |
907 | class ApplyRMSProp { |
908 | public: |
909 | /// Optional attribute setters for ApplyRMSProp |
910 | struct Attrs { |
911 | /// If `True`, updating of the var, ms, and mom tensors is protected |
912 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
913 | /// contention. |
914 | /// |
915 | /// Defaults to false |
916 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
917 | Attrs ret = *this; |
918 | ret.use_locking_ = x; |
919 | return ret; |
920 | } |
921 | |
922 | bool use_locking_ = false; |
923 | }; |
924 | ApplyRMSProp(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
925 | ::tensorflow::Input ms, ::tensorflow::Input mom, |
926 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
927 | ::tensorflow::Input momentum, ::tensorflow::Input epsilon, |
928 | ::tensorflow::Input grad); |
929 | ApplyRMSProp(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
930 | ::tensorflow::Input ms, ::tensorflow::Input mom, |
931 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
932 | ::tensorflow::Input momentum, ::tensorflow::Input epsilon, |
933 | ::tensorflow::Input grad, const ApplyRMSProp::Attrs& attrs); |
934 | operator ::tensorflow::Output() const { return out; } |
935 | operator ::tensorflow::Input() const { return out; } |
936 | ::tensorflow::Node* node() const { return out.node(); } |
937 | |
938 | static Attrs UseLocking(bool x) { |
939 | return Attrs().UseLocking(x); |
940 | } |
941 | |
942 | Operation operation; |
943 | ::tensorflow::Output out; |
944 | }; |
945 | |
946 | /// Update '*var' according to the adadelta scheme. |
947 | /// |
948 | /// accum = rho() * accum + (1 - rho()) * grad.square(); |
949 | /// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; |
950 | /// update_accum = rho() * update_accum + (1 - rho()) * update.square(); |
951 | /// var -= update; |
952 | /// |
953 | /// Args: |
954 | /// * scope: A Scope object |
955 | /// * var: Should be from a Variable(). |
956 | /// * accum: Should be from a Variable(). |
957 | /// * accum_update: Should be from a Variable(). |
958 | /// * lr: Scaling factor. Must be a scalar. |
959 | /// * rho: Decay factor. Must be a scalar. |
960 | /// * epsilon: Constant factor. Must be a scalar. |
961 | /// * grad: The gradient. |
962 | /// |
963 | /// Optional attributes (see `Attrs`): |
964 | /// * use_locking: If True, updating of the var, accum and update_accum tensors will be protected by |
965 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
966 | /// |
967 | /// Returns: |
968 | /// * the created `Operation` |
969 | class ResourceApplyAdadelta { |
970 | public: |
971 | /// Optional attribute setters for ResourceApplyAdadelta |
972 | struct Attrs { |
973 | /// If True, updating of the var, accum and update_accum tensors will be protected by |
974 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
975 | /// |
976 | /// Defaults to false |
977 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
978 | Attrs ret = *this; |
979 | ret.use_locking_ = x; |
980 | return ret; |
981 | } |
982 | |
983 | bool use_locking_ = false; |
984 | }; |
985 | ResourceApplyAdadelta(const ::tensorflow::Scope& scope, ::tensorflow::Input |
986 | var, ::tensorflow::Input accum, ::tensorflow::Input |
987 | accum_update, ::tensorflow::Input lr, ::tensorflow::Input |
988 | rho, ::tensorflow::Input epsilon, ::tensorflow::Input |
989 | grad); |
990 | ResourceApplyAdadelta(const ::tensorflow::Scope& scope, ::tensorflow::Input |
991 | var, ::tensorflow::Input accum, ::tensorflow::Input |
992 | accum_update, ::tensorflow::Input lr, ::tensorflow::Input |
993 | rho, ::tensorflow::Input epsilon, ::tensorflow::Input |
994 | grad, const ResourceApplyAdadelta::Attrs& attrs); |
995 | operator ::tensorflow::Operation() const { return operation; } |
996 | |
997 | static Attrs UseLocking(bool x) { |
998 | return Attrs().UseLocking(x); |
999 | } |
1000 | |
1001 | Operation operation; |
1002 | }; |
1003 | |
1004 | /// Update '*var' according to the adagrad scheme. |
1005 | /// |
1006 | /// accum += grad * grad |
1007 | /// var -= lr * grad * (1 / sqrt(accum)) |
1008 | /// |
1009 | /// Args: |
1010 | /// * scope: A Scope object |
1011 | /// * var: Should be from a Variable(). |
1012 | /// * accum: Should be from a Variable(). |
1013 | /// * lr: Scaling factor. Must be a scalar. |
1014 | /// * grad: The gradient. |
1015 | /// |
1016 | /// Optional attributes (see `Attrs`): |
1017 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
1018 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1019 | /// contention. |
1020 | /// |
1021 | /// Returns: |
1022 | /// * the created `Operation` |
1023 | class ResourceApplyAdagrad { |
1024 | public: |
1025 | /// Optional attribute setters for ResourceApplyAdagrad |
1026 | struct Attrs { |
1027 | /// If `True`, updating of the var and accum tensors will be protected |
1028 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1029 | /// contention. |
1030 | /// |
1031 | /// Defaults to false |
1032 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1033 | Attrs ret = *this; |
1034 | ret.use_locking_ = x; |
1035 | return ret; |
1036 | } |
1037 | |
1038 | /// Defaults to true |
1039 | TF_MUST_USE_RESULT Attrs UpdateSlots(bool x) { |
1040 | Attrs ret = *this; |
1041 | ret.update_slots_ = x; |
1042 | return ret; |
1043 | } |
1044 | |
1045 | bool use_locking_ = false; |
1046 | bool update_slots_ = true; |
1047 | }; |
1048 | ResourceApplyAdagrad(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1049 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
1050 | ::tensorflow::Input grad); |
1051 | ResourceApplyAdagrad(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1052 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
1053 | ::tensorflow::Input grad, const |
1054 | ResourceApplyAdagrad::Attrs& attrs); |
1055 | operator ::tensorflow::Operation() const { return operation; } |
1056 | |
1057 | static Attrs UseLocking(bool x) { |
1058 | return Attrs().UseLocking(x); |
1059 | } |
1060 | static Attrs UpdateSlots(bool x) { |
1061 | return Attrs().UpdateSlots(x); |
1062 | } |
1063 | |
1064 | Operation operation; |
1065 | }; |
1066 | |
1067 | /// Update '*var' according to the proximal adagrad scheme. |
1068 | /// |
1069 | /// Args: |
1070 | /// * scope: A Scope object |
1071 | /// * var: Should be from a Variable(). |
1072 | /// * gradient_accumulator: Should be from a Variable(). |
1073 | /// * gradient_squared_accumulator: Should be from a Variable(). |
1074 | /// * grad: The gradient. |
1075 | /// * lr: Scaling factor. Must be a scalar. |
1076 | /// * l1: L1 regularization. Must be a scalar. |
1077 | /// * l2: L2 regularization. Must be a scalar. |
1078 | /// * global_step: Training step number. Must be a scalar. |
1079 | /// |
1080 | /// Optional attributes (see `Attrs`): |
1081 | /// * use_locking: If True, updating of the var and accum tensors will be protected by |
1082 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
1083 | /// |
1084 | /// Returns: |
1085 | /// * the created `Operation` |
1086 | class ResourceApplyAdagradDA { |
1087 | public: |
1088 | /// Optional attribute setters for ResourceApplyAdagradDA |
1089 | struct Attrs { |
1090 | /// If True, updating of the var and accum tensors will be protected by |
1091 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
1092 | /// |
1093 | /// Defaults to false |
1094 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1095 | Attrs ret = *this; |
1096 | ret.use_locking_ = x; |
1097 | return ret; |
1098 | } |
1099 | |
1100 | bool use_locking_ = false; |
1101 | }; |
1102 | ResourceApplyAdagradDA(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1103 | var, ::tensorflow::Input gradient_accumulator, |
1104 | ::tensorflow::Input gradient_squared_accumulator, |
1105 | ::tensorflow::Input grad, ::tensorflow::Input lr, |
1106 | ::tensorflow::Input l1, ::tensorflow::Input l2, |
1107 | ::tensorflow::Input global_step); |
1108 | ResourceApplyAdagradDA(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1109 | var, ::tensorflow::Input gradient_accumulator, |
1110 | ::tensorflow::Input gradient_squared_accumulator, |
1111 | ::tensorflow::Input grad, ::tensorflow::Input lr, |
1112 | ::tensorflow::Input l1, ::tensorflow::Input l2, |
1113 | ::tensorflow::Input global_step, const |
1114 | ResourceApplyAdagradDA::Attrs& attrs); |
1115 | operator ::tensorflow::Operation() const { return operation; } |
1116 | |
1117 | static Attrs UseLocking(bool x) { |
1118 | return Attrs().UseLocking(x); |
1119 | } |
1120 | |
1121 | Operation operation; |
1122 | }; |
1123 | |
1124 | /// Update '*var' according to the Adam algorithm. |
1125 | /// |
1126 | /// $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ |
1127 | /// $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ |
1128 | /// $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ |
1129 | /// $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ |
1130 | /// |
1131 | /// Args: |
1132 | /// * scope: A Scope object |
1133 | /// * var: Should be from a Variable(). |
1134 | /// * m: Should be from a Variable(). |
1135 | /// * v: Should be from a Variable(). |
1136 | /// * beta1_power: Must be a scalar. |
1137 | /// * beta2_power: Must be a scalar. |
1138 | /// * lr: Scaling factor. Must be a scalar. |
1139 | /// * beta1: Momentum factor. Must be a scalar. |
1140 | /// * beta2: Momentum factor. Must be a scalar. |
1141 | /// * epsilon: Ridge term. Must be a scalar. |
1142 | /// * grad: The gradient. |
1143 | /// |
1144 | /// Optional attributes (see `Attrs`): |
1145 | /// * use_locking: If `True`, updating of the var, m, and v tensors will be protected |
1146 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1147 | /// contention. |
1148 | /// * use_nesterov: If `True`, uses the nesterov update. |
1149 | /// |
1150 | /// Returns: |
1151 | /// * the created `Operation` |
1152 | class ResourceApplyAdam { |
1153 | public: |
1154 | /// Optional attribute setters for ResourceApplyAdam |
1155 | struct Attrs { |
1156 | /// If `True`, updating of the var, m, and v tensors will be protected |
1157 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1158 | /// contention. |
1159 | /// |
1160 | /// Defaults to false |
1161 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1162 | Attrs ret = *this; |
1163 | ret.use_locking_ = x; |
1164 | return ret; |
1165 | } |
1166 | |
1167 | /// If `True`, uses the nesterov update. |
1168 | /// |
1169 | /// Defaults to false |
1170 | TF_MUST_USE_RESULT Attrs UseNesterov(bool x) { |
1171 | Attrs ret = *this; |
1172 | ret.use_nesterov_ = x; |
1173 | return ret; |
1174 | } |
1175 | |
1176 | bool use_locking_ = false; |
1177 | bool use_nesterov_ = false; |
1178 | }; |
1179 | ResourceApplyAdam(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1180 | ::tensorflow::Input m, ::tensorflow::Input v, |
1181 | ::tensorflow::Input beta1_power, ::tensorflow::Input |
1182 | beta2_power, ::tensorflow::Input lr, ::tensorflow::Input |
1183 | beta1, ::tensorflow::Input beta2, ::tensorflow::Input |
1184 | epsilon, ::tensorflow::Input grad); |
1185 | ResourceApplyAdam(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1186 | ::tensorflow::Input m, ::tensorflow::Input v, |
1187 | ::tensorflow::Input beta1_power, ::tensorflow::Input |
1188 | beta2_power, ::tensorflow::Input lr, ::tensorflow::Input |
1189 | beta1, ::tensorflow::Input beta2, ::tensorflow::Input |
1190 | epsilon, ::tensorflow::Input grad, const |
1191 | ResourceApplyAdam::Attrs& attrs); |
1192 | operator ::tensorflow::Operation() const { return operation; } |
1193 | |
1194 | static Attrs UseLocking(bool x) { |
1195 | return Attrs().UseLocking(x); |
1196 | } |
1197 | static Attrs UseNesterov(bool x) { |
1198 | return Attrs().UseNesterov(x); |
1199 | } |
1200 | |
1201 | Operation operation; |
1202 | }; |
1203 | |
1204 | /// Update '*var' according to the Adam algorithm. |
1205 | /// |
1206 | /// $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ |
1207 | /// $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ |
1208 | /// $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ |
1209 | /// $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ |
1210 | /// $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ |
1211 | /// |
1212 | /// Args: |
1213 | /// * scope: A Scope object |
1214 | /// * var: Should be from a Variable(). |
1215 | /// * m: Should be from a Variable(). |
1216 | /// * v: Should be from a Variable(). |
1217 | /// * vhat: Should be from a Variable(). |
1218 | /// * beta1_power: Must be a scalar. |
1219 | /// * beta2_power: Must be a scalar. |
1220 | /// * lr: Scaling factor. Must be a scalar. |
1221 | /// * beta1: Momentum factor. Must be a scalar. |
1222 | /// * beta2: Momentum factor. Must be a scalar. |
1223 | /// * epsilon: Ridge term. Must be a scalar. |
1224 | /// * grad: The gradient. |
1225 | /// |
1226 | /// Optional attributes (see `Attrs`): |
1227 | /// * use_locking: If `True`, updating of the var, m, and v tensors will be protected |
1228 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1229 | /// contention. |
1230 | /// |
1231 | /// Returns: |
1232 | /// * the created `Operation` |
1233 | class ResourceApplyAdamWithAmsgrad { |
1234 | public: |
1235 | /// Optional attribute setters for ResourceApplyAdamWithAmsgrad |
1236 | struct Attrs { |
1237 | /// If `True`, updating of the var, m, and v tensors will be protected |
1238 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1239 | /// contention. |
1240 | /// |
1241 | /// Defaults to false |
1242 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1243 | Attrs ret = *this; |
1244 | ret.use_locking_ = x; |
1245 | return ret; |
1246 | } |
1247 | |
1248 | bool use_locking_ = false; |
1249 | }; |
1250 | ResourceApplyAdamWithAmsgrad(const ::tensorflow::Scope& scope, |
1251 | ::tensorflow::Input var, ::tensorflow::Input m, |
1252 | ::tensorflow::Input v, ::tensorflow::Input vhat, |
1253 | ::tensorflow::Input beta1_power, |
1254 | ::tensorflow::Input beta2_power, |
1255 | ::tensorflow::Input lr, ::tensorflow::Input beta1, |
1256 | ::tensorflow::Input beta2, ::tensorflow::Input |
1257 | epsilon, ::tensorflow::Input grad); |
1258 | ResourceApplyAdamWithAmsgrad(const ::tensorflow::Scope& scope, |
1259 | ::tensorflow::Input var, ::tensorflow::Input m, |
1260 | ::tensorflow::Input v, ::tensorflow::Input vhat, |
1261 | ::tensorflow::Input beta1_power, |
1262 | ::tensorflow::Input beta2_power, |
1263 | ::tensorflow::Input lr, ::tensorflow::Input beta1, |
1264 | ::tensorflow::Input beta2, ::tensorflow::Input |
1265 | epsilon, ::tensorflow::Input grad, const |
1266 | ResourceApplyAdamWithAmsgrad::Attrs& attrs); |
1267 | operator ::tensorflow::Operation() const { return operation; } |
1268 | |
1269 | static Attrs UseLocking(bool x) { |
1270 | return Attrs().UseLocking(x); |
1271 | } |
1272 | |
1273 | Operation operation; |
1274 | }; |
1275 | |
1276 | /// Update '*var' according to the AddSign update. |
1277 | /// |
1278 | /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g |
1279 | /// update <- (alpha + sign_decay * sign(g) *sign(m)) * g |
1280 | /// variable <- variable - lr_t * update |
1281 | /// |
1282 | /// Args: |
1283 | /// * scope: A Scope object |
1284 | /// * var: Should be from a Variable(). |
1285 | /// * m: Should be from a Variable(). |
1286 | /// * lr: Scaling factor. Must be a scalar. |
1287 | /// * alpha: Must be a scalar. |
1288 | /// * sign_decay: Must be a scalar. |
1289 | /// * beta: Must be a scalar. |
1290 | /// * grad: The gradient. |
1291 | /// |
1292 | /// Optional attributes (see `Attrs`): |
1293 | /// * use_locking: If `True`, updating of the var and m tensors is |
1294 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
1295 | /// contention. |
1296 | /// |
1297 | /// Returns: |
1298 | /// * the created `Operation` |
1299 | class ResourceApplyAddSign { |
1300 | public: |
1301 | /// Optional attribute setters for ResourceApplyAddSign |
1302 | struct Attrs { |
1303 | /// If `True`, updating of the var and m tensors is |
1304 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
1305 | /// contention. |
1306 | /// |
1307 | /// Defaults to false |
1308 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1309 | Attrs ret = *this; |
1310 | ret.use_locking_ = x; |
1311 | return ret; |
1312 | } |
1313 | |
1314 | bool use_locking_ = false; |
1315 | }; |
1316 | ResourceApplyAddSign(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1317 | ::tensorflow::Input m, ::tensorflow::Input lr, |
1318 | ::tensorflow::Input alpha, ::tensorflow::Input sign_decay, |
1319 | ::tensorflow::Input beta, ::tensorflow::Input grad); |
1320 | ResourceApplyAddSign(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1321 | ::tensorflow::Input m, ::tensorflow::Input lr, |
1322 | ::tensorflow::Input alpha, ::tensorflow::Input sign_decay, |
1323 | ::tensorflow::Input beta, ::tensorflow::Input grad, const |
1324 | ResourceApplyAddSign::Attrs& attrs); |
1325 | operator ::tensorflow::Operation() const { return operation; } |
1326 | |
1327 | static Attrs UseLocking(bool x) { |
1328 | return Attrs().UseLocking(x); |
1329 | } |
1330 | |
1331 | Operation operation; |
1332 | }; |
1333 | |
1334 | /// Update '*var' according to the centered RMSProp algorithm. |
1335 | /// |
1336 | /// The centered RMSProp algorithm uses an estimate of the centered second moment |
1337 | /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which |
1338 | /// uses the (uncentered) second moment. This often helps with training, but is |
1339 | /// slightly more expensive in terms of computation and memory. |
1340 | /// |
1341 | /// Note that in dense implementation of this algorithm, mg, ms, and mom will |
1342 | /// update even if the grad is zero, but in this sparse implementation, mg, ms, |
1343 | /// and mom will not update in iterations during which the grad is zero. |
1344 | /// |
1345 | /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 |
1346 | /// mean_grad = decay * mean_grad + (1-decay) * gradient |
1347 | /// |
1348 | /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) |
1349 | /// |
1350 | /// mg <- rho * mg_{t-1} + (1-rho) * grad |
1351 | /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad |
1352 | /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) |
1353 | /// var <- var - mom |
1354 | /// |
1355 | /// Args: |
1356 | /// * scope: A Scope object |
1357 | /// * var: Should be from a Variable(). |
1358 | /// * mg: Should be from a Variable(). |
1359 | /// * ms: Should be from a Variable(). |
1360 | /// * mom: Should be from a Variable(). |
1361 | /// * lr: Scaling factor. Must be a scalar. |
1362 | /// * rho: Decay rate. Must be a scalar. |
1363 | /// * momentum: Momentum Scale. Must be a scalar. |
1364 | /// * epsilon: Ridge term. Must be a scalar. |
1365 | /// * grad: The gradient. |
1366 | /// |
1367 | /// Optional attributes (see `Attrs`): |
1368 | /// * use_locking: If `True`, updating of the var, mg, ms, and mom tensors is |
1369 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
1370 | /// contention. |
1371 | /// |
1372 | /// Returns: |
1373 | /// * the created `Operation` |
1374 | class ResourceApplyCenteredRMSProp { |
1375 | public: |
1376 | /// Optional attribute setters for ResourceApplyCenteredRMSProp |
1377 | struct Attrs { |
1378 | /// If `True`, updating of the var, mg, ms, and mom tensors is |
1379 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
1380 | /// contention. |
1381 | /// |
1382 | /// Defaults to false |
1383 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1384 | Attrs ret = *this; |
1385 | ret.use_locking_ = x; |
1386 | return ret; |
1387 | } |
1388 | |
1389 | bool use_locking_ = false; |
1390 | }; |
1391 | ResourceApplyCenteredRMSProp(const ::tensorflow::Scope& scope, |
1392 | ::tensorflow::Input var, ::tensorflow::Input mg, |
1393 | ::tensorflow::Input ms, ::tensorflow::Input mom, |
1394 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
1395 | ::tensorflow::Input momentum, ::tensorflow::Input |
1396 | epsilon, ::tensorflow::Input grad); |
1397 | ResourceApplyCenteredRMSProp(const ::tensorflow::Scope& scope, |
1398 | ::tensorflow::Input var, ::tensorflow::Input mg, |
1399 | ::tensorflow::Input ms, ::tensorflow::Input mom, |
1400 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
1401 | ::tensorflow::Input momentum, ::tensorflow::Input |
1402 | epsilon, ::tensorflow::Input grad, const |
1403 | ResourceApplyCenteredRMSProp::Attrs& attrs); |
1404 | operator ::tensorflow::Operation() const { return operation; } |
1405 | |
1406 | static Attrs UseLocking(bool x) { |
1407 | return Attrs().UseLocking(x); |
1408 | } |
1409 | |
1410 | Operation operation; |
1411 | }; |
1412 | |
1413 | /// Update '*var' according to the Ftrl-proximal scheme. |
1414 | /// |
1415 | /// accum_new = accum + grad * grad |
1416 | /// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var |
1417 | /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 |
1418 | /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 |
1419 | /// accum = accum_new |
1420 | /// |
1421 | /// Args: |
1422 | /// * scope: A Scope object |
1423 | /// * var: Should be from a Variable(). |
1424 | /// * accum: Should be from a Variable(). |
1425 | /// * linear: Should be from a Variable(). |
1426 | /// * grad: The gradient. |
1427 | /// * lr: Scaling factor. Must be a scalar. |
1428 | /// * l1: L1 regularization. Must be a scalar. |
1429 | /// * l2: L2 regularization. Must be a scalar. |
1430 | /// * lr_power: Scaling factor. Must be a scalar. |
1431 | /// |
1432 | /// Optional attributes (see `Attrs`): |
1433 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
1434 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1435 | /// contention. |
1436 | /// |
1437 | /// Returns: |
1438 | /// * the created `Operation` |
1439 | class ResourceApplyFtrl { |
1440 | public: |
1441 | /// Optional attribute setters for ResourceApplyFtrl |
1442 | struct Attrs { |
1443 | /// If `True`, updating of the var and accum tensors will be protected |
1444 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1445 | /// contention. |
1446 | /// |
1447 | /// Defaults to false |
1448 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1449 | Attrs ret = *this; |
1450 | ret.use_locking_ = x; |
1451 | return ret; |
1452 | } |
1453 | |
1454 | /// Defaults to false |
1455 | TF_MUST_USE_RESULT Attrs MultiplyLinearByLr(bool x) { |
1456 | Attrs ret = *this; |
1457 | ret.multiply_linear_by_lr_ = x; |
1458 | return ret; |
1459 | } |
1460 | |
1461 | bool use_locking_ = false; |
1462 | bool multiply_linear_by_lr_ = false; |
1463 | }; |
1464 | ResourceApplyFtrl(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1465 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
1466 | ::tensorflow::Input grad, ::tensorflow::Input lr, |
1467 | ::tensorflow::Input l1, ::tensorflow::Input l2, |
1468 | ::tensorflow::Input lr_power); |
1469 | ResourceApplyFtrl(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1470 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
1471 | ::tensorflow::Input grad, ::tensorflow::Input lr, |
1472 | ::tensorflow::Input l1, ::tensorflow::Input l2, |
1473 | ::tensorflow::Input lr_power, const ResourceApplyFtrl::Attrs& |
1474 | attrs); |
1475 | operator ::tensorflow::Operation() const { return operation; } |
1476 | |
1477 | static Attrs UseLocking(bool x) { |
1478 | return Attrs().UseLocking(x); |
1479 | } |
1480 | static Attrs MultiplyLinearByLr(bool x) { |
1481 | return Attrs().MultiplyLinearByLr(x); |
1482 | } |
1483 | |
1484 | Operation operation; |
1485 | }; |
1486 | |
1487 | /// Update '*var' according to the Ftrl-proximal scheme. |
1488 | /// |
1489 | /// accum_new = accum + grad * grad |
1490 | /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var |
1491 | /// linear += grad_with_shrinkage + |
1492 | /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var |
1493 | /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 |
1494 | /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 |
1495 | /// accum = accum_new |
1496 | /// |
1497 | /// Args: |
1498 | /// * scope: A Scope object |
1499 | /// * var: Should be from a Variable(). |
1500 | /// * accum: Should be from a Variable(). |
1501 | /// * linear: Should be from a Variable(). |
1502 | /// * grad: The gradient. |
1503 | /// * lr: Scaling factor. Must be a scalar. |
1504 | /// * l1: L1 regularization. Must be a scalar. |
1505 | /// * l2: L2 shrinkage regularization. Must be a scalar. |
1506 | /// * lr_power: Scaling factor. Must be a scalar. |
1507 | /// |
1508 | /// Optional attributes (see `Attrs`): |
1509 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
1510 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1511 | /// contention. |
1512 | /// |
1513 | /// Returns: |
1514 | /// * the created `Operation` |
1515 | class ResourceApplyFtrlV2 { |
1516 | public: |
1517 | /// Optional attribute setters for ResourceApplyFtrlV2 |
1518 | struct Attrs { |
1519 | /// If `True`, updating of the var and accum tensors will be protected |
1520 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1521 | /// contention. |
1522 | /// |
1523 | /// Defaults to false |
1524 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1525 | Attrs ret = *this; |
1526 | ret.use_locking_ = x; |
1527 | return ret; |
1528 | } |
1529 | |
1530 | /// Defaults to false |
1531 | TF_MUST_USE_RESULT Attrs MultiplyLinearByLr(bool x) { |
1532 | Attrs ret = *this; |
1533 | ret.multiply_linear_by_lr_ = x; |
1534 | return ret; |
1535 | } |
1536 | |
1537 | bool use_locking_ = false; |
1538 | bool multiply_linear_by_lr_ = false; |
1539 | }; |
1540 | ResourceApplyFtrlV2(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1541 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
1542 | ::tensorflow::Input grad, ::tensorflow::Input lr, |
1543 | ::tensorflow::Input l1, ::tensorflow::Input l2, |
1544 | ::tensorflow::Input l2_shrinkage, ::tensorflow::Input |
1545 | lr_power); |
1546 | ResourceApplyFtrlV2(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1547 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
1548 | ::tensorflow::Input grad, ::tensorflow::Input lr, |
1549 | ::tensorflow::Input l1, ::tensorflow::Input l2, |
1550 | ::tensorflow::Input l2_shrinkage, ::tensorflow::Input |
1551 | lr_power, const ResourceApplyFtrlV2::Attrs& attrs); |
1552 | operator ::tensorflow::Operation() const { return operation; } |
1553 | |
1554 | static Attrs UseLocking(bool x) { |
1555 | return Attrs().UseLocking(x); |
1556 | } |
1557 | static Attrs MultiplyLinearByLr(bool x) { |
1558 | return Attrs().MultiplyLinearByLr(x); |
1559 | } |
1560 | |
1561 | Operation operation; |
1562 | }; |
1563 | |
1564 | /// Update '*var' by subtracting 'alpha' * 'delta' from it. |
1565 | /// |
1566 | /// Args: |
1567 | /// * scope: A Scope object |
1568 | /// * var: Should be from a Variable(). |
1569 | /// * alpha: Scaling factor. Must be a scalar. |
1570 | /// * delta: The change. |
1571 | /// |
1572 | /// Optional attributes (see `Attrs`): |
1573 | /// * use_locking: If `True`, the subtraction will be protected by a lock; |
1574 | /// otherwise the behavior is undefined, but may exhibit less contention. |
1575 | /// |
1576 | /// Returns: |
1577 | /// * the created `Operation` |
1578 | class ResourceApplyGradientDescent { |
1579 | public: |
1580 | /// Optional attribute setters for ResourceApplyGradientDescent |
1581 | struct Attrs { |
1582 | /// If `True`, the subtraction will be protected by a lock; |
1583 | /// otherwise the behavior is undefined, but may exhibit less contention. |
1584 | /// |
1585 | /// Defaults to false |
1586 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1587 | Attrs ret = *this; |
1588 | ret.use_locking_ = x; |
1589 | return ret; |
1590 | } |
1591 | |
1592 | bool use_locking_ = false; |
1593 | }; |
1594 | ResourceApplyGradientDescent(const ::tensorflow::Scope& scope, |
1595 | ::tensorflow::Input var, ::tensorflow::Input |
1596 | alpha, ::tensorflow::Input delta); |
1597 | ResourceApplyGradientDescent(const ::tensorflow::Scope& scope, |
1598 | ::tensorflow::Input var, ::tensorflow::Input |
1599 | alpha, ::tensorflow::Input delta, const |
1600 | ResourceApplyGradientDescent::Attrs& attrs); |
1601 | operator ::tensorflow::Operation() const { return operation; } |
1602 | |
1603 | static Attrs UseLocking(bool x) { |
1604 | return Attrs().UseLocking(x); |
1605 | } |
1606 | |
1607 | Operation operation; |
1608 | }; |
1609 | |
1610 | /// Update '*var' according to the momentum scheme. |
1611 | /// |
1612 | /// Set use_nesterov = True if you want to use Nesterov momentum. |
1613 | /// |
1614 | /// accum = accum * momentum - lr * grad |
1615 | /// var += accum |
1616 | /// |
1617 | /// Args: |
1618 | /// * scope: A Scope object |
1619 | /// * var: Should be from a Variable(). |
1620 | /// * accum: Should be from a Variable(). |
1621 | /// * lr: Scaling factor. Must be a scalar. |
1622 | /// * grad: The gradient. |
1623 | /// * momentum: Momentum. Must be a scalar. |
1624 | /// |
1625 | /// Optional attributes (see `Attrs`): |
1626 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
1627 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1628 | /// contention. |
1629 | /// * use_nesterov: If `True`, the tensor passed to compute grad will be |
1630 | /// var + momentum * accum, so in the end, the var you get is actually |
1631 | /// var + momentum * accum. |
1632 | /// |
1633 | /// Returns: |
1634 | /// * the created `Operation` |
1635 | class ResourceApplyKerasMomentum { |
1636 | public: |
1637 | /// Optional attribute setters for ResourceApplyKerasMomentum |
1638 | struct Attrs { |
1639 | /// If `True`, updating of the var and accum tensors will be protected |
1640 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1641 | /// contention. |
1642 | /// |
1643 | /// Defaults to false |
1644 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1645 | Attrs ret = *this; |
1646 | ret.use_locking_ = x; |
1647 | return ret; |
1648 | } |
1649 | |
1650 | /// If `True`, the tensor passed to compute grad will be |
1651 | /// var + momentum * accum, so in the end, the var you get is actually |
1652 | /// var + momentum * accum. |
1653 | /// |
1654 | /// Defaults to false |
1655 | TF_MUST_USE_RESULT Attrs UseNesterov(bool x) { |
1656 | Attrs ret = *this; |
1657 | ret.use_nesterov_ = x; |
1658 | return ret; |
1659 | } |
1660 | |
1661 | bool use_locking_ = false; |
1662 | bool use_nesterov_ = false; |
1663 | }; |
1664 | ResourceApplyKerasMomentum(const ::tensorflow::Scope& scope, |
1665 | ::tensorflow::Input var, ::tensorflow::Input accum, |
1666 | ::tensorflow::Input lr, ::tensorflow::Input grad, |
1667 | ::tensorflow::Input momentum); |
1668 | ResourceApplyKerasMomentum(const ::tensorflow::Scope& scope, |
1669 | ::tensorflow::Input var, ::tensorflow::Input accum, |
1670 | ::tensorflow::Input lr, ::tensorflow::Input grad, |
1671 | ::tensorflow::Input momentum, const |
1672 | ResourceApplyKerasMomentum::Attrs& attrs); |
1673 | operator ::tensorflow::Operation() const { return operation; } |
1674 | |
1675 | static Attrs UseLocking(bool x) { |
1676 | return Attrs().UseLocking(x); |
1677 | } |
1678 | static Attrs UseNesterov(bool x) { |
1679 | return Attrs().UseNesterov(x); |
1680 | } |
1681 | |
1682 | Operation operation; |
1683 | }; |
1684 | |
1685 | /// Update '*var' according to the momentum scheme. |
1686 | /// |
1687 | /// Set use_nesterov = True if you want to use Nesterov momentum. |
1688 | /// |
1689 | /// accum = accum * momentum + grad |
1690 | /// var -= lr * accum |
1691 | /// |
1692 | /// Args: |
1693 | /// * scope: A Scope object |
1694 | /// * var: Should be from a Variable(). |
1695 | /// * accum: Should be from a Variable(). |
1696 | /// * lr: Scaling factor. Must be a scalar. |
1697 | /// * grad: The gradient. |
1698 | /// * momentum: Momentum. Must be a scalar. |
1699 | /// |
1700 | /// Optional attributes (see `Attrs`): |
1701 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
1702 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1703 | /// contention. |
1704 | /// * use_nesterov: If `True`, the tensor passed to compute grad will be |
1705 | /// var - lr * momentum * accum, so in the end, the var you get is actually |
1706 | /// var - lr * momentum * accum. |
1707 | /// |
1708 | /// Returns: |
1709 | /// * the created `Operation` |
1710 | class ResourceApplyMomentum { |
1711 | public: |
1712 | /// Optional attribute setters for ResourceApplyMomentum |
1713 | struct Attrs { |
1714 | /// If `True`, updating of the var and accum tensors will be protected |
1715 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1716 | /// contention. |
1717 | /// |
1718 | /// Defaults to false |
1719 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1720 | Attrs ret = *this; |
1721 | ret.use_locking_ = x; |
1722 | return ret; |
1723 | } |
1724 | |
1725 | /// If `True`, the tensor passed to compute grad will be |
1726 | /// var - lr * momentum * accum, so in the end, the var you get is actually |
1727 | /// var - lr * momentum * accum. |
1728 | /// |
1729 | /// Defaults to false |
1730 | TF_MUST_USE_RESULT Attrs UseNesterov(bool x) { |
1731 | Attrs ret = *this; |
1732 | ret.use_nesterov_ = x; |
1733 | return ret; |
1734 | } |
1735 | |
1736 | bool use_locking_ = false; |
1737 | bool use_nesterov_ = false; |
1738 | }; |
1739 | ResourceApplyMomentum(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1740 | var, ::tensorflow::Input accum, ::tensorflow::Input lr, |
1741 | ::tensorflow::Input grad, ::tensorflow::Input momentum); |
1742 | ResourceApplyMomentum(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1743 | var, ::tensorflow::Input accum, ::tensorflow::Input lr, |
1744 | ::tensorflow::Input grad, ::tensorflow::Input momentum, |
1745 | const ResourceApplyMomentum::Attrs& attrs); |
1746 | operator ::tensorflow::Operation() const { return operation; } |
1747 | |
1748 | static Attrs UseLocking(bool x) { |
1749 | return Attrs().UseLocking(x); |
1750 | } |
1751 | static Attrs UseNesterov(bool x) { |
1752 | return Attrs().UseNesterov(x); |
1753 | } |
1754 | |
1755 | Operation operation; |
1756 | }; |
1757 | |
1758 | /// Update '*var' according to the AddSign update. |
1759 | /// |
1760 | /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g |
1761 | /// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g |
1762 | /// variable <- variable - lr_t * update |
1763 | /// |
1764 | /// Args: |
1765 | /// * scope: A Scope object |
1766 | /// * var: Should be from a Variable(). |
1767 | /// * m: Should be from a Variable(). |
1768 | /// * lr: Scaling factor. Must be a scalar. |
1769 | /// * logbase: Must be a scalar. |
1770 | /// * sign_decay: Must be a scalar. |
1771 | /// * beta: Must be a scalar. |
1772 | /// * grad: The gradient. |
1773 | /// |
1774 | /// Optional attributes (see `Attrs`): |
1775 | /// * use_locking: If `True`, updating of the var and m tensors is |
1776 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
1777 | /// contention. |
1778 | /// |
1779 | /// Returns: |
1780 | /// * the created `Operation` |
1781 | class ResourceApplyPowerSign { |
1782 | public: |
1783 | /// Optional attribute setters for ResourceApplyPowerSign |
1784 | struct Attrs { |
1785 | /// If `True`, updating of the var and m tensors is |
1786 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
1787 | /// contention. |
1788 | /// |
1789 | /// Defaults to false |
1790 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1791 | Attrs ret = *this; |
1792 | ret.use_locking_ = x; |
1793 | return ret; |
1794 | } |
1795 | |
1796 | bool use_locking_ = false; |
1797 | }; |
1798 | ResourceApplyPowerSign(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1799 | var, ::tensorflow::Input m, ::tensorflow::Input lr, |
1800 | ::tensorflow::Input logbase, ::tensorflow::Input |
1801 | sign_decay, ::tensorflow::Input beta, |
1802 | ::tensorflow::Input grad); |
1803 | ResourceApplyPowerSign(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1804 | var, ::tensorflow::Input m, ::tensorflow::Input lr, |
1805 | ::tensorflow::Input logbase, ::tensorflow::Input |
1806 | sign_decay, ::tensorflow::Input beta, |
1807 | ::tensorflow::Input grad, const |
1808 | ResourceApplyPowerSign::Attrs& attrs); |
1809 | operator ::tensorflow::Operation() const { return operation; } |
1810 | |
1811 | static Attrs UseLocking(bool x) { |
1812 | return Attrs().UseLocking(x); |
1813 | } |
1814 | |
1815 | Operation operation; |
1816 | }; |
1817 | |
1818 | /// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. |
1819 | /// |
1820 | /// accum += grad * grad |
1821 | /// prox_v = var - lr * grad * (1 / sqrt(accum)) |
1822 | /// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} |
1823 | /// |
1824 | /// Args: |
1825 | /// * scope: A Scope object |
1826 | /// * var: Should be from a Variable(). |
1827 | /// * accum: Should be from a Variable(). |
1828 | /// * lr: Scaling factor. Must be a scalar. |
1829 | /// * l1: L1 regularization. Must be a scalar. |
1830 | /// * l2: L2 regularization. Must be a scalar. |
1831 | /// * grad: The gradient. |
1832 | /// |
1833 | /// Optional attributes (see `Attrs`): |
1834 | /// * use_locking: If True, updating of the var and accum tensors will be protected by |
1835 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
1836 | /// |
1837 | /// Returns: |
1838 | /// * the created `Operation` |
1839 | class ResourceApplyProximalAdagrad { |
1840 | public: |
1841 | /// Optional attribute setters for ResourceApplyProximalAdagrad |
1842 | struct Attrs { |
1843 | /// If True, updating of the var and accum tensors will be protected by |
1844 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
1845 | /// |
1846 | /// Defaults to false |
1847 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1848 | Attrs ret = *this; |
1849 | ret.use_locking_ = x; |
1850 | return ret; |
1851 | } |
1852 | |
1853 | bool use_locking_ = false; |
1854 | }; |
1855 | ResourceApplyProximalAdagrad(const ::tensorflow::Scope& scope, |
1856 | ::tensorflow::Input var, ::tensorflow::Input |
1857 | accum, ::tensorflow::Input lr, ::tensorflow::Input |
1858 | l1, ::tensorflow::Input l2, ::tensorflow::Input |
1859 | grad); |
1860 | ResourceApplyProximalAdagrad(const ::tensorflow::Scope& scope, |
1861 | ::tensorflow::Input var, ::tensorflow::Input |
1862 | accum, ::tensorflow::Input lr, ::tensorflow::Input |
1863 | l1, ::tensorflow::Input l2, ::tensorflow::Input |
1864 | grad, const ResourceApplyProximalAdagrad::Attrs& |
1865 | attrs); |
1866 | operator ::tensorflow::Operation() const { return operation; } |
1867 | |
1868 | static Attrs UseLocking(bool x) { |
1869 | return Attrs().UseLocking(x); |
1870 | } |
1871 | |
1872 | Operation operation; |
1873 | }; |
1874 | |
1875 | /// Update '*var' as FOBOS algorithm with fixed learning rate. |
1876 | /// |
1877 | /// prox_v = var - alpha * delta |
1878 | /// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} |
1879 | /// |
1880 | /// Args: |
1881 | /// * scope: A Scope object |
1882 | /// * var: Should be from a Variable(). |
1883 | /// * alpha: Scaling factor. Must be a scalar. |
1884 | /// * l1: L1 regularization. Must be a scalar. |
1885 | /// * l2: L2 regularization. Must be a scalar. |
1886 | /// * delta: The change. |
1887 | /// |
1888 | /// Optional attributes (see `Attrs`): |
1889 | /// * use_locking: If True, the subtraction will be protected by a lock; |
1890 | /// otherwise the behavior is undefined, but may exhibit less contention. |
1891 | /// |
1892 | /// Returns: |
1893 | /// * the created `Operation` |
1894 | class ResourceApplyProximalGradientDescent { |
1895 | public: |
1896 | /// Optional attribute setters for ResourceApplyProximalGradientDescent |
1897 | struct Attrs { |
1898 | /// If True, the subtraction will be protected by a lock; |
1899 | /// otherwise the behavior is undefined, but may exhibit less contention. |
1900 | /// |
1901 | /// Defaults to false |
1902 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1903 | Attrs ret = *this; |
1904 | ret.use_locking_ = x; |
1905 | return ret; |
1906 | } |
1907 | |
1908 | bool use_locking_ = false; |
1909 | }; |
1910 | ResourceApplyProximalGradientDescent(const ::tensorflow::Scope& scope, |
1911 | ::tensorflow::Input var, |
1912 | ::tensorflow::Input alpha, |
1913 | ::tensorflow::Input l1, |
1914 | ::tensorflow::Input l2, |
1915 | ::tensorflow::Input delta); |
1916 | ResourceApplyProximalGradientDescent(const ::tensorflow::Scope& scope, |
1917 | ::tensorflow::Input var, |
1918 | ::tensorflow::Input alpha, |
1919 | ::tensorflow::Input l1, |
1920 | ::tensorflow::Input l2, |
1921 | ::tensorflow::Input delta, const |
1922 | ResourceApplyProximalGradientDescent::Attrs& |
1923 | attrs); |
1924 | operator ::tensorflow::Operation() const { return operation; } |
1925 | |
1926 | static Attrs UseLocking(bool x) { |
1927 | return Attrs().UseLocking(x); |
1928 | } |
1929 | |
1930 | Operation operation; |
1931 | }; |
1932 | |
1933 | /// Update '*var' according to the RMSProp algorithm. |
1934 | /// |
1935 | /// Note that in dense implementation of this algorithm, ms and mom will |
1936 | /// update even if the grad is zero, but in this sparse implementation, ms |
1937 | /// and mom will not update in iterations during which the grad is zero. |
1938 | /// |
1939 | /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 |
1940 | /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) |
1941 | /// |
1942 | /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad |
1943 | /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) |
1944 | /// var <- var - mom |
1945 | /// |
1946 | /// Args: |
1947 | /// * scope: A Scope object |
1948 | /// * var: Should be from a Variable(). |
1949 | /// * ms: Should be from a Variable(). |
1950 | /// * mom: Should be from a Variable(). |
1951 | /// * lr: Scaling factor. Must be a scalar. |
1952 | /// * rho: Decay rate. Must be a scalar. |
1953 | /// * epsilon: Ridge term. Must be a scalar. |
1954 | /// * grad: The gradient. |
1955 | /// |
1956 | /// Optional attributes (see `Attrs`): |
1957 | /// * use_locking: If `True`, updating of the var, ms, and mom tensors is protected |
1958 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1959 | /// contention. |
1960 | /// |
1961 | /// Returns: |
1962 | /// * the created `Operation` |
1963 | class ResourceApplyRMSProp { |
1964 | public: |
1965 | /// Optional attribute setters for ResourceApplyRMSProp |
1966 | struct Attrs { |
1967 | /// If `True`, updating of the var, ms, and mom tensors is protected |
1968 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
1969 | /// contention. |
1970 | /// |
1971 | /// Defaults to false |
1972 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
1973 | Attrs ret = *this; |
1974 | ret.use_locking_ = x; |
1975 | return ret; |
1976 | } |
1977 | |
1978 | bool use_locking_ = false; |
1979 | }; |
1980 | ResourceApplyRMSProp(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1981 | ::tensorflow::Input ms, ::tensorflow::Input mom, |
1982 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
1983 | ::tensorflow::Input momentum, ::tensorflow::Input epsilon, |
1984 | ::tensorflow::Input grad); |
1985 | ResourceApplyRMSProp(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
1986 | ::tensorflow::Input ms, ::tensorflow::Input mom, |
1987 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
1988 | ::tensorflow::Input momentum, ::tensorflow::Input epsilon, |
1989 | ::tensorflow::Input grad, const |
1990 | ResourceApplyRMSProp::Attrs& attrs); |
1991 | operator ::tensorflow::Operation() const { return operation; } |
1992 | |
1993 | static Attrs UseLocking(bool x) { |
1994 | return Attrs().UseLocking(x); |
1995 | } |
1996 | |
1997 | Operation operation; |
1998 | }; |
1999 | |
2000 | /// var: Should be from a Variable(). |
2001 | /// |
2002 | /// Args: |
2003 | /// * scope: A Scope object |
2004 | /// * accum: Should be from a Variable(). |
2005 | /// * accum_update: : Should be from a Variable(). |
2006 | /// * lr: Learning rate. Must be a scalar. |
2007 | /// * rho: Decay factor. Must be a scalar. |
2008 | /// * epsilon: Constant factor. Must be a scalar. |
2009 | /// * grad: The gradient. |
2010 | /// * indices: A vector of indices into the first dimension of var and accum. |
2011 | /// |
2012 | /// Optional attributes (see `Attrs`): |
2013 | /// * use_locking: If True, updating of the var and accum tensors will be protected by |
2014 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
2015 | /// |
2016 | /// Returns: |
2017 | /// * the created `Operation` |
2018 | class ResourceSparseApplyAdadelta { |
2019 | public: |
2020 | /// Optional attribute setters for ResourceSparseApplyAdadelta |
2021 | struct Attrs { |
2022 | /// If True, updating of the var and accum tensors will be protected by |
2023 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
2024 | /// |
2025 | /// Defaults to false |
2026 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2027 | Attrs ret = *this; |
2028 | ret.use_locking_ = x; |
2029 | return ret; |
2030 | } |
2031 | |
2032 | bool use_locking_ = false; |
2033 | }; |
2034 | ResourceSparseApplyAdadelta(const ::tensorflow::Scope& scope, |
2035 | ::tensorflow::Input var, ::tensorflow::Input accum, |
2036 | ::tensorflow::Input accum_update, |
2037 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
2038 | ::tensorflow::Input epsilon, ::tensorflow::Input |
2039 | grad, ::tensorflow::Input indices); |
2040 | ResourceSparseApplyAdadelta(const ::tensorflow::Scope& scope, |
2041 | ::tensorflow::Input var, ::tensorflow::Input accum, |
2042 | ::tensorflow::Input accum_update, |
2043 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
2044 | ::tensorflow::Input epsilon, ::tensorflow::Input |
2045 | grad, ::tensorflow::Input indices, const |
2046 | ResourceSparseApplyAdadelta::Attrs& attrs); |
2047 | operator ::tensorflow::Operation() const { return operation; } |
2048 | |
2049 | static Attrs UseLocking(bool x) { |
2050 | return Attrs().UseLocking(x); |
2051 | } |
2052 | |
2053 | Operation operation; |
2054 | }; |
2055 | |
2056 | /// Update relevant entries in '*var' and '*accum' according to the adagrad scheme. |
2057 | /// |
2058 | /// That is for rows we have grad for, we update var and accum as follows: |
2059 | /// accum += grad * grad |
2060 | /// var -= lr * grad * (1 / sqrt(accum)) |
2061 | /// |
2062 | /// Args: |
2063 | /// * scope: A Scope object |
2064 | /// * var: Should be from a Variable(). |
2065 | /// * accum: Should be from a Variable(). |
2066 | /// * lr: Learning rate. Must be a scalar. |
2067 | /// * grad: The gradient. |
2068 | /// * indices: A vector of indices into the first dimension of var and accum. |
2069 | /// |
2070 | /// Optional attributes (see `Attrs`): |
2071 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
2072 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2073 | /// contention. |
2074 | /// |
2075 | /// Returns: |
2076 | /// * the created `Operation` |
2077 | class ResourceSparseApplyAdagrad { |
2078 | public: |
2079 | /// Optional attribute setters for ResourceSparseApplyAdagrad |
2080 | struct Attrs { |
2081 | /// If `True`, updating of the var and accum tensors will be protected |
2082 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2083 | /// contention. |
2084 | /// |
2085 | /// Defaults to false |
2086 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2087 | Attrs ret = *this; |
2088 | ret.use_locking_ = x; |
2089 | return ret; |
2090 | } |
2091 | |
2092 | /// Defaults to true |
2093 | TF_MUST_USE_RESULT Attrs UpdateSlots(bool x) { |
2094 | Attrs ret = *this; |
2095 | ret.update_slots_ = x; |
2096 | return ret; |
2097 | } |
2098 | |
2099 | bool use_locking_ = false; |
2100 | bool update_slots_ = true; |
2101 | }; |
2102 | ResourceSparseApplyAdagrad(const ::tensorflow::Scope& scope, |
2103 | ::tensorflow::Input var, ::tensorflow::Input accum, |
2104 | ::tensorflow::Input lr, ::tensorflow::Input grad, |
2105 | ::tensorflow::Input indices); |
2106 | ResourceSparseApplyAdagrad(const ::tensorflow::Scope& scope, |
2107 | ::tensorflow::Input var, ::tensorflow::Input accum, |
2108 | ::tensorflow::Input lr, ::tensorflow::Input grad, |
2109 | ::tensorflow::Input indices, const |
2110 | ResourceSparseApplyAdagrad::Attrs& attrs); |
2111 | operator ::tensorflow::Operation() const { return operation; } |
2112 | |
2113 | static Attrs UseLocking(bool x) { |
2114 | return Attrs().UseLocking(x); |
2115 | } |
2116 | static Attrs UpdateSlots(bool x) { |
2117 | return Attrs().UpdateSlots(x); |
2118 | } |
2119 | |
2120 | Operation operation; |
2121 | }; |
2122 | |
2123 | /// Update entries in '*var' and '*accum' according to the proximal adagrad scheme. |
2124 | /// |
2125 | /// Args: |
2126 | /// * scope: A Scope object |
2127 | /// * var: Should be from a Variable(). |
2128 | /// * gradient_accumulator: Should be from a Variable(). |
2129 | /// * gradient_squared_accumulator: Should be from a Variable(). |
2130 | /// * grad: The gradient. |
2131 | /// * indices: A vector of indices into the first dimension of var and accum. |
2132 | /// * lr: Learning rate. Must be a scalar. |
2133 | /// * l1: L1 regularization. Must be a scalar. |
2134 | /// * l2: L2 regularization. Must be a scalar. |
2135 | /// * global_step: Training step number. Must be a scalar. |
2136 | /// |
2137 | /// Optional attributes (see `Attrs`): |
2138 | /// * use_locking: If True, updating of the var and accum tensors will be protected by |
2139 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
2140 | /// |
2141 | /// Returns: |
2142 | /// * the created `Operation` |
2143 | class ResourceSparseApplyAdagradDA { |
2144 | public: |
2145 | /// Optional attribute setters for ResourceSparseApplyAdagradDA |
2146 | struct Attrs { |
2147 | /// If True, updating of the var and accum tensors will be protected by |
2148 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
2149 | /// |
2150 | /// Defaults to false |
2151 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2152 | Attrs ret = *this; |
2153 | ret.use_locking_ = x; |
2154 | return ret; |
2155 | } |
2156 | |
2157 | bool use_locking_ = false; |
2158 | }; |
2159 | ResourceSparseApplyAdagradDA(const ::tensorflow::Scope& scope, |
2160 | ::tensorflow::Input var, ::tensorflow::Input |
2161 | gradient_accumulator, ::tensorflow::Input |
2162 | gradient_squared_accumulator, ::tensorflow::Input |
2163 | grad, ::tensorflow::Input indices, |
2164 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
2165 | ::tensorflow::Input l2, ::tensorflow::Input |
2166 | global_step); |
2167 | ResourceSparseApplyAdagradDA(const ::tensorflow::Scope& scope, |
2168 | ::tensorflow::Input var, ::tensorflow::Input |
2169 | gradient_accumulator, ::tensorflow::Input |
2170 | gradient_squared_accumulator, ::tensorflow::Input |
2171 | grad, ::tensorflow::Input indices, |
2172 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
2173 | ::tensorflow::Input l2, ::tensorflow::Input |
2174 | global_step, const |
2175 | ResourceSparseApplyAdagradDA::Attrs& attrs); |
2176 | operator ::tensorflow::Operation() const { return operation; } |
2177 | |
2178 | static Attrs UseLocking(bool x) { |
2179 | return Attrs().UseLocking(x); |
2180 | } |
2181 | |
2182 | Operation operation; |
2183 | }; |
2184 | |
2185 | /// Update '*var' according to the centered RMSProp algorithm. |
2186 | /// |
2187 | /// The centered RMSProp algorithm uses an estimate of the centered second moment |
2188 | /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which |
2189 | /// uses the (uncentered) second moment. This often helps with training, but is |
2190 | /// slightly more expensive in terms of computation and memory. |
2191 | /// |
2192 | /// Note that in dense implementation of this algorithm, mg, ms, and mom will |
2193 | /// update even if the grad is zero, but in this sparse implementation, mg, ms, |
2194 | /// and mom will not update in iterations during which the grad is zero. |
2195 | /// |
2196 | /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 |
2197 | /// mean_grad = decay * mean_grad + (1-decay) * gradient |
2198 | /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) |
2199 | /// |
2200 | /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad |
2201 | /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) |
2202 | /// var <- var - mom |
2203 | /// |
2204 | /// Args: |
2205 | /// * scope: A Scope object |
2206 | /// * var: Should be from a Variable(). |
2207 | /// * mg: Should be from a Variable(). |
2208 | /// * ms: Should be from a Variable(). |
2209 | /// * mom: Should be from a Variable(). |
2210 | /// * lr: Scaling factor. Must be a scalar. |
2211 | /// * rho: Decay rate. Must be a scalar. |
2212 | /// * epsilon: Ridge term. Must be a scalar. |
2213 | /// * grad: The gradient. |
2214 | /// * indices: A vector of indices into the first dimension of var, ms and mom. |
2215 | /// |
2216 | /// Optional attributes (see `Attrs`): |
2217 | /// * use_locking: If `True`, updating of the var, mg, ms, and mom tensors is |
2218 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
2219 | /// contention. |
2220 | /// |
2221 | /// Returns: |
2222 | /// * the created `Operation` |
2223 | class ResourceSparseApplyCenteredRMSProp { |
2224 | public: |
2225 | /// Optional attribute setters for ResourceSparseApplyCenteredRMSProp |
2226 | struct Attrs { |
2227 | /// If `True`, updating of the var, mg, ms, and mom tensors is |
2228 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
2229 | /// contention. |
2230 | /// |
2231 | /// Defaults to false |
2232 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2233 | Attrs ret = *this; |
2234 | ret.use_locking_ = x; |
2235 | return ret; |
2236 | } |
2237 | |
2238 | bool use_locking_ = false; |
2239 | }; |
2240 | ResourceSparseApplyCenteredRMSProp(const ::tensorflow::Scope& scope, |
2241 | ::tensorflow::Input var, ::tensorflow::Input |
2242 | mg, ::tensorflow::Input ms, |
2243 | ::tensorflow::Input mom, ::tensorflow::Input |
2244 | lr, ::tensorflow::Input rho, |
2245 | ::tensorflow::Input momentum, |
2246 | ::tensorflow::Input epsilon, |
2247 | ::tensorflow::Input grad, |
2248 | ::tensorflow::Input indices); |
2249 | ResourceSparseApplyCenteredRMSProp(const ::tensorflow::Scope& scope, |
2250 | ::tensorflow::Input var, ::tensorflow::Input |
2251 | mg, ::tensorflow::Input ms, |
2252 | ::tensorflow::Input mom, ::tensorflow::Input |
2253 | lr, ::tensorflow::Input rho, |
2254 | ::tensorflow::Input momentum, |
2255 | ::tensorflow::Input epsilon, |
2256 | ::tensorflow::Input grad, |
2257 | ::tensorflow::Input indices, const |
2258 | ResourceSparseApplyCenteredRMSProp::Attrs& |
2259 | attrs); |
2260 | operator ::tensorflow::Operation() const { return operation; } |
2261 | |
2262 | static Attrs UseLocking(bool x) { |
2263 | return Attrs().UseLocking(x); |
2264 | } |
2265 | |
2266 | Operation operation; |
2267 | }; |
2268 | |
2269 | /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. |
2270 | /// |
2271 | /// That is for rows we have grad for, we update var, accum and linear as follows: |
2272 | /// accum_new = accum + grad * grad |
2273 | /// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var |
2274 | /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 |
2275 | /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 |
2276 | /// accum = accum_new |
2277 | /// |
2278 | /// Args: |
2279 | /// * scope: A Scope object |
2280 | /// * var: Should be from a Variable(). |
2281 | /// * accum: Should be from a Variable(). |
2282 | /// * linear: Should be from a Variable(). |
2283 | /// * grad: The gradient. |
2284 | /// * indices: A vector of indices into the first dimension of var and accum. |
2285 | /// * lr: Scaling factor. Must be a scalar. |
2286 | /// * l1: L1 regularization. Must be a scalar. |
2287 | /// * l2: L2 regularization. Must be a scalar. |
2288 | /// * lr_power: Scaling factor. Must be a scalar. |
2289 | /// |
2290 | /// Optional attributes (see `Attrs`): |
2291 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
2292 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2293 | /// contention. |
2294 | /// |
2295 | /// Returns: |
2296 | /// * the created `Operation` |
2297 | class ResourceSparseApplyFtrl { |
2298 | public: |
2299 | /// Optional attribute setters for ResourceSparseApplyFtrl |
2300 | struct Attrs { |
2301 | /// If `True`, updating of the var and accum tensors will be protected |
2302 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2303 | /// contention. |
2304 | /// |
2305 | /// Defaults to false |
2306 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2307 | Attrs ret = *this; |
2308 | ret.use_locking_ = x; |
2309 | return ret; |
2310 | } |
2311 | |
2312 | /// Defaults to false |
2313 | TF_MUST_USE_RESULT Attrs MultiplyLinearByLr(bool x) { |
2314 | Attrs ret = *this; |
2315 | ret.multiply_linear_by_lr_ = x; |
2316 | return ret; |
2317 | } |
2318 | |
2319 | bool use_locking_ = false; |
2320 | bool multiply_linear_by_lr_ = false; |
2321 | }; |
2322 | ResourceSparseApplyFtrl(const ::tensorflow::Scope& scope, ::tensorflow::Input |
2323 | var, ::tensorflow::Input accum, ::tensorflow::Input |
2324 | linear, ::tensorflow::Input grad, ::tensorflow::Input |
2325 | indices, ::tensorflow::Input lr, ::tensorflow::Input |
2326 | l1, ::tensorflow::Input l2, ::tensorflow::Input |
2327 | lr_power); |
2328 | ResourceSparseApplyFtrl(const ::tensorflow::Scope& scope, ::tensorflow::Input |
2329 | var, ::tensorflow::Input accum, ::tensorflow::Input |
2330 | linear, ::tensorflow::Input grad, ::tensorflow::Input |
2331 | indices, ::tensorflow::Input lr, ::tensorflow::Input |
2332 | l1, ::tensorflow::Input l2, ::tensorflow::Input |
2333 | lr_power, const ResourceSparseApplyFtrl::Attrs& attrs); |
2334 | operator ::tensorflow::Operation() const { return operation; } |
2335 | |
2336 | static Attrs UseLocking(bool x) { |
2337 | return Attrs().UseLocking(x); |
2338 | } |
2339 | static Attrs MultiplyLinearByLr(bool x) { |
2340 | return Attrs().MultiplyLinearByLr(x); |
2341 | } |
2342 | |
2343 | Operation operation; |
2344 | }; |
2345 | |
2346 | /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. |
2347 | /// |
2348 | /// That is for rows we have grad for, we update var, accum and linear as follows: |
2349 | /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var |
2350 | /// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage |
2351 | /// linear += grad_with_shrinkage + |
2352 | /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var |
2353 | /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 |
2354 | /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 |
2355 | /// accum = accum_new |
2356 | /// |
2357 | /// Args: |
2358 | /// * scope: A Scope object |
2359 | /// * var: Should be from a Variable(). |
2360 | /// * accum: Should be from a Variable(). |
2361 | /// * linear: Should be from a Variable(). |
2362 | /// * grad: The gradient. |
2363 | /// * indices: A vector of indices into the first dimension of var and accum. |
2364 | /// * lr: Scaling factor. Must be a scalar. |
2365 | /// * l1: L1 regularization. Must be a scalar. |
2366 | /// * l2: L2 shrinkage regularization. Must be a scalar. |
2367 | /// * lr_power: Scaling factor. Must be a scalar. |
2368 | /// |
2369 | /// Optional attributes (see `Attrs`): |
2370 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
2371 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2372 | /// contention. |
2373 | /// |
2374 | /// Returns: |
2375 | /// * the created `Operation` |
2376 | class ResourceSparseApplyFtrlV2 { |
2377 | public: |
2378 | /// Optional attribute setters for ResourceSparseApplyFtrlV2 |
2379 | struct Attrs { |
2380 | /// If `True`, updating of the var and accum tensors will be protected |
2381 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2382 | /// contention. |
2383 | /// |
2384 | /// Defaults to false |
2385 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2386 | Attrs ret = *this; |
2387 | ret.use_locking_ = x; |
2388 | return ret; |
2389 | } |
2390 | |
2391 | /// Defaults to false |
2392 | TF_MUST_USE_RESULT Attrs MultiplyLinearByLr(bool x) { |
2393 | Attrs ret = *this; |
2394 | ret.multiply_linear_by_lr_ = x; |
2395 | return ret; |
2396 | } |
2397 | |
2398 | bool use_locking_ = false; |
2399 | bool multiply_linear_by_lr_ = false; |
2400 | }; |
2401 | ResourceSparseApplyFtrlV2(const ::tensorflow::Scope& scope, ::tensorflow::Input |
2402 | var, ::tensorflow::Input accum, ::tensorflow::Input |
2403 | linear, ::tensorflow::Input grad, ::tensorflow::Input |
2404 | indices, ::tensorflow::Input lr, ::tensorflow::Input |
2405 | l1, ::tensorflow::Input l2, ::tensorflow::Input |
2406 | l2_shrinkage, ::tensorflow::Input lr_power); |
2407 | ResourceSparseApplyFtrlV2(const ::tensorflow::Scope& scope, ::tensorflow::Input |
2408 | var, ::tensorflow::Input accum, ::tensorflow::Input |
2409 | linear, ::tensorflow::Input grad, ::tensorflow::Input |
2410 | indices, ::tensorflow::Input lr, ::tensorflow::Input |
2411 | l1, ::tensorflow::Input l2, ::tensorflow::Input |
2412 | l2_shrinkage, ::tensorflow::Input lr_power, const |
2413 | ResourceSparseApplyFtrlV2::Attrs& attrs); |
2414 | operator ::tensorflow::Operation() const { return operation; } |
2415 | |
2416 | static Attrs UseLocking(bool x) { |
2417 | return Attrs().UseLocking(x); |
2418 | } |
2419 | static Attrs MultiplyLinearByLr(bool x) { |
2420 | return Attrs().MultiplyLinearByLr(x); |
2421 | } |
2422 | |
2423 | Operation operation; |
2424 | }; |
2425 | |
2426 | /// Update relevant entries in '*var' and '*accum' according to the momentum scheme. |
2427 | /// |
2428 | /// Set use_nesterov = True if you want to use Nesterov momentum. |
2429 | /// |
2430 | /// That is for rows we have grad for, we update var and accum as follows: |
2431 | /// |
2432 | /// accum = accum * momentum - lr * grad |
2433 | /// var += accum |
2434 | /// |
2435 | /// Args: |
2436 | /// * scope: A Scope object |
2437 | /// * var: Should be from a Variable(). |
2438 | /// * accum: Should be from a Variable(). |
2439 | /// * lr: Learning rate. Must be a scalar. |
2440 | /// * grad: The gradient. |
2441 | /// * indices: A vector of indices into the first dimension of var and accum. |
2442 | /// * momentum: Momentum. Must be a scalar. |
2443 | /// |
2444 | /// Optional attributes (see `Attrs`): |
2445 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
2446 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2447 | /// contention. |
2448 | /// * use_nesterov: If `True`, the tensor passed to compute grad will be |
2449 | /// var + momentum * accum, so in the end, the var you get is actually |
2450 | /// var + momentum * accum. |
2451 | /// |
2452 | /// Returns: |
2453 | /// * the created `Operation` |
2454 | class ResourceSparseApplyKerasMomentum { |
2455 | public: |
2456 | /// Optional attribute setters for ResourceSparseApplyKerasMomentum |
2457 | struct Attrs { |
2458 | /// If `True`, updating of the var and accum tensors will be protected |
2459 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2460 | /// contention. |
2461 | /// |
2462 | /// Defaults to false |
2463 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2464 | Attrs ret = *this; |
2465 | ret.use_locking_ = x; |
2466 | return ret; |
2467 | } |
2468 | |
2469 | /// If `True`, the tensor passed to compute grad will be |
2470 | /// var + momentum * accum, so in the end, the var you get is actually |
2471 | /// var + momentum * accum. |
2472 | /// |
2473 | /// Defaults to false |
2474 | TF_MUST_USE_RESULT Attrs UseNesterov(bool x) { |
2475 | Attrs ret = *this; |
2476 | ret.use_nesterov_ = x; |
2477 | return ret; |
2478 | } |
2479 | |
2480 | bool use_locking_ = false; |
2481 | bool use_nesterov_ = false; |
2482 | }; |
2483 | ResourceSparseApplyKerasMomentum(const ::tensorflow::Scope& scope, |
2484 | ::tensorflow::Input var, ::tensorflow::Input |
2485 | accum, ::tensorflow::Input lr, |
2486 | ::tensorflow::Input grad, ::tensorflow::Input |
2487 | indices, ::tensorflow::Input momentum); |
2488 | ResourceSparseApplyKerasMomentum(const ::tensorflow::Scope& scope, |
2489 | ::tensorflow::Input var, ::tensorflow::Input |
2490 | accum, ::tensorflow::Input lr, |
2491 | ::tensorflow::Input grad, ::tensorflow::Input |
2492 | indices, ::tensorflow::Input momentum, const |
2493 | ResourceSparseApplyKerasMomentum::Attrs& |
2494 | attrs); |
2495 | operator ::tensorflow::Operation() const { return operation; } |
2496 | |
2497 | static Attrs UseLocking(bool x) { |
2498 | return Attrs().UseLocking(x); |
2499 | } |
2500 | static Attrs UseNesterov(bool x) { |
2501 | return Attrs().UseNesterov(x); |
2502 | } |
2503 | |
2504 | Operation operation; |
2505 | }; |
2506 | |
2507 | /// Update relevant entries in '*var' and '*accum' according to the momentum scheme. |
2508 | /// |
2509 | /// Set use_nesterov = True if you want to use Nesterov momentum. |
2510 | /// |
2511 | /// That is for rows we have grad for, we update var and accum as follows: |
2512 | /// |
2513 | /// accum = accum * momentum + grad |
2514 | /// var -= lr * accum |
2515 | /// |
2516 | /// Args: |
2517 | /// * scope: A Scope object |
2518 | /// * var: Should be from a Variable(). |
2519 | /// * accum: Should be from a Variable(). |
2520 | /// * lr: Learning rate. Must be a scalar. |
2521 | /// * grad: The gradient. |
2522 | /// * indices: A vector of indices into the first dimension of var and accum. |
2523 | /// * momentum: Momentum. Must be a scalar. |
2524 | /// |
2525 | /// Optional attributes (see `Attrs`): |
2526 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
2527 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2528 | /// contention. |
2529 | /// * use_nesterov: If `True`, the tensor passed to compute grad will be |
2530 | /// var - lr * momentum * accum, so in the end, the var you get is actually |
2531 | /// var - lr * momentum * accum. |
2532 | /// |
2533 | /// Returns: |
2534 | /// * the created `Operation` |
2535 | class ResourceSparseApplyMomentum { |
2536 | public: |
2537 | /// Optional attribute setters for ResourceSparseApplyMomentum |
2538 | struct Attrs { |
2539 | /// If `True`, updating of the var and accum tensors will be protected |
2540 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2541 | /// contention. |
2542 | /// |
2543 | /// Defaults to false |
2544 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2545 | Attrs ret = *this; |
2546 | ret.use_locking_ = x; |
2547 | return ret; |
2548 | } |
2549 | |
2550 | /// If `True`, the tensor passed to compute grad will be |
2551 | /// var - lr * momentum * accum, so in the end, the var you get is actually |
2552 | /// var - lr * momentum * accum. |
2553 | /// |
2554 | /// Defaults to false |
2555 | TF_MUST_USE_RESULT Attrs UseNesterov(bool x) { |
2556 | Attrs ret = *this; |
2557 | ret.use_nesterov_ = x; |
2558 | return ret; |
2559 | } |
2560 | |
2561 | bool use_locking_ = false; |
2562 | bool use_nesterov_ = false; |
2563 | }; |
2564 | ResourceSparseApplyMomentum(const ::tensorflow::Scope& scope, |
2565 | ::tensorflow::Input var, ::tensorflow::Input accum, |
2566 | ::tensorflow::Input lr, ::tensorflow::Input grad, |
2567 | ::tensorflow::Input indices, ::tensorflow::Input |
2568 | momentum); |
2569 | ResourceSparseApplyMomentum(const ::tensorflow::Scope& scope, |
2570 | ::tensorflow::Input var, ::tensorflow::Input accum, |
2571 | ::tensorflow::Input lr, ::tensorflow::Input grad, |
2572 | ::tensorflow::Input indices, ::tensorflow::Input |
2573 | momentum, const ResourceSparseApplyMomentum::Attrs& |
2574 | attrs); |
2575 | operator ::tensorflow::Operation() const { return operation; } |
2576 | |
2577 | static Attrs UseLocking(bool x) { |
2578 | return Attrs().UseLocking(x); |
2579 | } |
2580 | static Attrs UseNesterov(bool x) { |
2581 | return Attrs().UseNesterov(x); |
2582 | } |
2583 | |
2584 | Operation operation; |
2585 | }; |
2586 | |
2587 | /// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. |
2588 | /// |
2589 | /// That is for rows we have grad for, we update var and accum as follows: |
2590 | /// accum += grad * grad |
2591 | /// prox_v = var |
2592 | /// prox_v -= lr * grad * (1 / sqrt(accum)) |
2593 | /// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} |
2594 | /// |
2595 | /// Args: |
2596 | /// * scope: A Scope object |
2597 | /// * var: Should be from a Variable(). |
2598 | /// * accum: Should be from a Variable(). |
2599 | /// * lr: Learning rate. Must be a scalar. |
2600 | /// * l1: L1 regularization. Must be a scalar. |
2601 | /// * l2: L2 regularization. Must be a scalar. |
2602 | /// * grad: The gradient. |
2603 | /// * indices: A vector of indices into the first dimension of var and accum. |
2604 | /// |
2605 | /// Optional attributes (see `Attrs`): |
2606 | /// * use_locking: If True, updating of the var and accum tensors will be protected by |
2607 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
2608 | /// |
2609 | /// Returns: |
2610 | /// * the created `Operation` |
2611 | class ResourceSparseApplyProximalAdagrad { |
2612 | public: |
2613 | /// Optional attribute setters for ResourceSparseApplyProximalAdagrad |
2614 | struct Attrs { |
2615 | /// If True, updating of the var and accum tensors will be protected by |
2616 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
2617 | /// |
2618 | /// Defaults to false |
2619 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2620 | Attrs ret = *this; |
2621 | ret.use_locking_ = x; |
2622 | return ret; |
2623 | } |
2624 | |
2625 | bool use_locking_ = false; |
2626 | }; |
2627 | ResourceSparseApplyProximalAdagrad(const ::tensorflow::Scope& scope, |
2628 | ::tensorflow::Input var, ::tensorflow::Input |
2629 | accum, ::tensorflow::Input lr, |
2630 | ::tensorflow::Input l1, ::tensorflow::Input |
2631 | l2, ::tensorflow::Input grad, |
2632 | ::tensorflow::Input indices); |
2633 | ResourceSparseApplyProximalAdagrad(const ::tensorflow::Scope& scope, |
2634 | ::tensorflow::Input var, ::tensorflow::Input |
2635 | accum, ::tensorflow::Input lr, |
2636 | ::tensorflow::Input l1, ::tensorflow::Input |
2637 | l2, ::tensorflow::Input grad, |
2638 | ::tensorflow::Input indices, const |
2639 | ResourceSparseApplyProximalAdagrad::Attrs& |
2640 | attrs); |
2641 | operator ::tensorflow::Operation() const { return operation; } |
2642 | |
2643 | static Attrs UseLocking(bool x) { |
2644 | return Attrs().UseLocking(x); |
2645 | } |
2646 | |
2647 | Operation operation; |
2648 | }; |
2649 | |
2650 | /// Sparse update '*var' as FOBOS algorithm with fixed learning rate. |
2651 | /// |
2652 | /// That is for rows we have grad for, we update var as follows: |
2653 | /// prox_v = var - alpha * grad |
2654 | /// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} |
2655 | /// |
2656 | /// Args: |
2657 | /// * scope: A Scope object |
2658 | /// * var: Should be from a Variable(). |
2659 | /// * alpha: Scaling factor. Must be a scalar. |
2660 | /// * l1: L1 regularization. Must be a scalar. |
2661 | /// * l2: L2 regularization. Must be a scalar. |
2662 | /// * grad: The gradient. |
2663 | /// * indices: A vector of indices into the first dimension of var and accum. |
2664 | /// |
2665 | /// Optional attributes (see `Attrs`): |
2666 | /// * use_locking: If True, the subtraction will be protected by a lock; |
2667 | /// otherwise the behavior is undefined, but may exhibit less contention. |
2668 | /// |
2669 | /// Returns: |
2670 | /// * the created `Operation` |
2671 | class ResourceSparseApplyProximalGradientDescent { |
2672 | public: |
2673 | /// Optional attribute setters for ResourceSparseApplyProximalGradientDescent |
2674 | struct Attrs { |
2675 | /// If True, the subtraction will be protected by a lock; |
2676 | /// otherwise the behavior is undefined, but may exhibit less contention. |
2677 | /// |
2678 | /// Defaults to false |
2679 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2680 | Attrs ret = *this; |
2681 | ret.use_locking_ = x; |
2682 | return ret; |
2683 | } |
2684 | |
2685 | bool use_locking_ = false; |
2686 | }; |
2687 | ResourceSparseApplyProximalGradientDescent(const ::tensorflow::Scope& scope, |
2688 | ::tensorflow::Input var, |
2689 | ::tensorflow::Input alpha, |
2690 | ::tensorflow::Input l1, |
2691 | ::tensorflow::Input l2, |
2692 | ::tensorflow::Input grad, |
2693 | ::tensorflow::Input indices); |
2694 | ResourceSparseApplyProximalGradientDescent(const ::tensorflow::Scope& scope, |
2695 | ::tensorflow::Input var, |
2696 | ::tensorflow::Input alpha, |
2697 | ::tensorflow::Input l1, |
2698 | ::tensorflow::Input l2, |
2699 | ::tensorflow::Input grad, |
2700 | ::tensorflow::Input indices, const |
2701 | ResourceSparseApplyProximalGradientDescent::Attrs& |
2702 | attrs); |
2703 | operator ::tensorflow::Operation() const { return operation; } |
2704 | |
2705 | static Attrs UseLocking(bool x) { |
2706 | return Attrs().UseLocking(x); |
2707 | } |
2708 | |
2709 | Operation operation; |
2710 | }; |
2711 | |
2712 | /// Update '*var' according to the RMSProp algorithm. |
2713 | /// |
2714 | /// Note that in dense implementation of this algorithm, ms and mom will |
2715 | /// update even if the grad is zero, but in this sparse implementation, ms |
2716 | /// and mom will not update in iterations during which the grad is zero. |
2717 | /// |
2718 | /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 |
2719 | /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) |
2720 | /// |
2721 | /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad |
2722 | /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) |
2723 | /// var <- var - mom |
2724 | /// |
2725 | /// Args: |
2726 | /// * scope: A Scope object |
2727 | /// * var: Should be from a Variable(). |
2728 | /// * ms: Should be from a Variable(). |
2729 | /// * mom: Should be from a Variable(). |
2730 | /// * lr: Scaling factor. Must be a scalar. |
2731 | /// * rho: Decay rate. Must be a scalar. |
2732 | /// * epsilon: Ridge term. Must be a scalar. |
2733 | /// * grad: The gradient. |
2734 | /// * indices: A vector of indices into the first dimension of var, ms and mom. |
2735 | /// |
2736 | /// Optional attributes (see `Attrs`): |
2737 | /// * use_locking: If `True`, updating of the var, ms, and mom tensors is protected |
2738 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2739 | /// contention. |
2740 | /// |
2741 | /// Returns: |
2742 | /// * the created `Operation` |
2743 | class ResourceSparseApplyRMSProp { |
2744 | public: |
2745 | /// Optional attribute setters for ResourceSparseApplyRMSProp |
2746 | struct Attrs { |
2747 | /// If `True`, updating of the var, ms, and mom tensors is protected |
2748 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2749 | /// contention. |
2750 | /// |
2751 | /// Defaults to false |
2752 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2753 | Attrs ret = *this; |
2754 | ret.use_locking_ = x; |
2755 | return ret; |
2756 | } |
2757 | |
2758 | bool use_locking_ = false; |
2759 | }; |
2760 | ResourceSparseApplyRMSProp(const ::tensorflow::Scope& scope, |
2761 | ::tensorflow::Input var, ::tensorflow::Input ms, |
2762 | ::tensorflow::Input mom, ::tensorflow::Input lr, |
2763 | ::tensorflow::Input rho, ::tensorflow::Input |
2764 | momentum, ::tensorflow::Input epsilon, |
2765 | ::tensorflow::Input grad, ::tensorflow::Input |
2766 | indices); |
2767 | ResourceSparseApplyRMSProp(const ::tensorflow::Scope& scope, |
2768 | ::tensorflow::Input var, ::tensorflow::Input ms, |
2769 | ::tensorflow::Input mom, ::tensorflow::Input lr, |
2770 | ::tensorflow::Input rho, ::tensorflow::Input |
2771 | momentum, ::tensorflow::Input epsilon, |
2772 | ::tensorflow::Input grad, ::tensorflow::Input |
2773 | indices, const ResourceSparseApplyRMSProp::Attrs& |
2774 | attrs); |
2775 | operator ::tensorflow::Operation() const { return operation; } |
2776 | |
2777 | static Attrs UseLocking(bool x) { |
2778 | return Attrs().UseLocking(x); |
2779 | } |
2780 | |
2781 | Operation operation; |
2782 | }; |
2783 | |
2784 | /// var: Should be from a Variable(). |
2785 | /// |
2786 | /// Args: |
2787 | /// * scope: A Scope object |
2788 | /// * accum: Should be from a Variable(). |
2789 | /// * accum_update: : Should be from a Variable(). |
2790 | /// * lr: Learning rate. Must be a scalar. |
2791 | /// * rho: Decay factor. Must be a scalar. |
2792 | /// * epsilon: Constant factor. Must be a scalar. |
2793 | /// * grad: The gradient. |
2794 | /// * indices: A vector of indices into the first dimension of var and accum. |
2795 | /// |
2796 | /// Optional attributes (see `Attrs`): |
2797 | /// * use_locking: If True, updating of the var and accum tensors will be protected by |
2798 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
2799 | /// |
2800 | /// Returns: |
2801 | /// * `Output`: Same as "var". |
2802 | class SparseApplyAdadelta { |
2803 | public: |
2804 | /// Optional attribute setters for SparseApplyAdadelta |
2805 | struct Attrs { |
2806 | /// If True, updating of the var and accum tensors will be protected by |
2807 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
2808 | /// |
2809 | /// Defaults to false |
2810 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2811 | Attrs ret = *this; |
2812 | ret.use_locking_ = x; |
2813 | return ret; |
2814 | } |
2815 | |
2816 | bool use_locking_ = false; |
2817 | }; |
2818 | SparseApplyAdadelta(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
2819 | ::tensorflow::Input accum, ::tensorflow::Input |
2820 | accum_update, ::tensorflow::Input lr, ::tensorflow::Input |
2821 | rho, ::tensorflow::Input epsilon, ::tensorflow::Input grad, |
2822 | ::tensorflow::Input indices); |
2823 | SparseApplyAdadelta(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
2824 | ::tensorflow::Input accum, ::tensorflow::Input |
2825 | accum_update, ::tensorflow::Input lr, ::tensorflow::Input |
2826 | rho, ::tensorflow::Input epsilon, ::tensorflow::Input grad, |
2827 | ::tensorflow::Input indices, const |
2828 | SparseApplyAdadelta::Attrs& attrs); |
2829 | operator ::tensorflow::Output() const { return out; } |
2830 | operator ::tensorflow::Input() const { return out; } |
2831 | ::tensorflow::Node* node() const { return out.node(); } |
2832 | |
2833 | static Attrs UseLocking(bool x) { |
2834 | return Attrs().UseLocking(x); |
2835 | } |
2836 | |
2837 | Operation operation; |
2838 | ::tensorflow::Output out; |
2839 | }; |
2840 | |
2841 | /// Update relevant entries in '*var' and '*accum' according to the adagrad scheme. |
2842 | /// |
2843 | /// That is for rows we have grad for, we update var and accum as follows: |
2844 | /// $$accum += grad * grad$$ |
2845 | /// $$var -= lr * grad * (1 / sqrt(accum))$$ |
2846 | /// |
2847 | /// Args: |
2848 | /// * scope: A Scope object |
2849 | /// * var: Should be from a Variable(). |
2850 | /// * accum: Should be from a Variable(). |
2851 | /// * lr: Learning rate. Must be a scalar. |
2852 | /// * grad: The gradient. |
2853 | /// * indices: A vector of indices into the first dimension of var and accum. |
2854 | /// |
2855 | /// Optional attributes (see `Attrs`): |
2856 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
2857 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2858 | /// contention. |
2859 | /// |
2860 | /// Returns: |
2861 | /// * `Output`: Same as "var". |
2862 | class SparseApplyAdagrad { |
2863 | public: |
2864 | /// Optional attribute setters for SparseApplyAdagrad |
2865 | struct Attrs { |
2866 | /// If `True`, updating of the var and accum tensors will be protected |
2867 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
2868 | /// contention. |
2869 | /// |
2870 | /// Defaults to false |
2871 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2872 | Attrs ret = *this; |
2873 | ret.use_locking_ = x; |
2874 | return ret; |
2875 | } |
2876 | |
2877 | /// Defaults to true |
2878 | TF_MUST_USE_RESULT Attrs UpdateSlots(bool x) { |
2879 | Attrs ret = *this; |
2880 | ret.update_slots_ = x; |
2881 | return ret; |
2882 | } |
2883 | |
2884 | bool use_locking_ = false; |
2885 | bool update_slots_ = true; |
2886 | }; |
2887 | SparseApplyAdagrad(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
2888 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
2889 | ::tensorflow::Input grad, ::tensorflow::Input indices); |
2890 | SparseApplyAdagrad(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
2891 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
2892 | ::tensorflow::Input grad, ::tensorflow::Input indices, const |
2893 | SparseApplyAdagrad::Attrs& attrs); |
2894 | operator ::tensorflow::Output() const { return out; } |
2895 | operator ::tensorflow::Input() const { return out; } |
2896 | ::tensorflow::Node* node() const { return out.node(); } |
2897 | |
2898 | static Attrs UseLocking(bool x) { |
2899 | return Attrs().UseLocking(x); |
2900 | } |
2901 | static Attrs UpdateSlots(bool x) { |
2902 | return Attrs().UpdateSlots(x); |
2903 | } |
2904 | |
2905 | Operation operation; |
2906 | ::tensorflow::Output out; |
2907 | }; |
2908 | |
2909 | /// Update entries in '*var' and '*accum' according to the proximal adagrad scheme. |
2910 | /// |
2911 | /// Args: |
2912 | /// * scope: A Scope object |
2913 | /// * var: Should be from a Variable(). |
2914 | /// * gradient_accumulator: Should be from a Variable(). |
2915 | /// * gradient_squared_accumulator: Should be from a Variable(). |
2916 | /// * grad: The gradient. |
2917 | /// * indices: A vector of indices into the first dimension of var and accum. |
2918 | /// * lr: Learning rate. Must be a scalar. |
2919 | /// * l1: L1 regularization. Must be a scalar. |
2920 | /// * l2: L2 regularization. Must be a scalar. |
2921 | /// * global_step: Training step number. Must be a scalar. |
2922 | /// |
2923 | /// Optional attributes (see `Attrs`): |
2924 | /// * use_locking: If True, updating of the var and accum tensors will be protected by |
2925 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
2926 | /// |
2927 | /// Returns: |
2928 | /// * `Output`: Same as "var". |
2929 | class SparseApplyAdagradDA { |
2930 | public: |
2931 | /// Optional attribute setters for SparseApplyAdagradDA |
2932 | struct Attrs { |
2933 | /// If True, updating of the var and accum tensors will be protected by |
2934 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
2935 | /// |
2936 | /// Defaults to false |
2937 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
2938 | Attrs ret = *this; |
2939 | ret.use_locking_ = x; |
2940 | return ret; |
2941 | } |
2942 | |
2943 | bool use_locking_ = false; |
2944 | }; |
2945 | SparseApplyAdagradDA(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
2946 | ::tensorflow::Input gradient_accumulator, |
2947 | ::tensorflow::Input gradient_squared_accumulator, |
2948 | ::tensorflow::Input grad, ::tensorflow::Input indices, |
2949 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
2950 | ::tensorflow::Input l2, ::tensorflow::Input global_step); |
2951 | SparseApplyAdagradDA(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
2952 | ::tensorflow::Input gradient_accumulator, |
2953 | ::tensorflow::Input gradient_squared_accumulator, |
2954 | ::tensorflow::Input grad, ::tensorflow::Input indices, |
2955 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
2956 | ::tensorflow::Input l2, ::tensorflow::Input global_step, |
2957 | const SparseApplyAdagradDA::Attrs& attrs); |
2958 | operator ::tensorflow::Output() const { return out; } |
2959 | operator ::tensorflow::Input() const { return out; } |
2960 | ::tensorflow::Node* node() const { return out.node(); } |
2961 | |
2962 | static Attrs UseLocking(bool x) { |
2963 | return Attrs().UseLocking(x); |
2964 | } |
2965 | |
2966 | Operation operation; |
2967 | ::tensorflow::Output out; |
2968 | }; |
2969 | |
2970 | /// Update '*var' according to the centered RMSProp algorithm. |
2971 | /// |
2972 | /// The centered RMSProp algorithm uses an estimate of the centered second moment |
2973 | /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which |
2974 | /// uses the (uncentered) second moment. This often helps with training, but is |
2975 | /// slightly more expensive in terms of computation and memory. |
2976 | /// |
2977 | /// Note that in dense implementation of this algorithm, mg, ms, and mom will |
2978 | /// update even if the grad is zero, but in this sparse implementation, mg, ms, |
2979 | /// and mom will not update in iterations during which the grad is zero. |
2980 | /// |
2981 | /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 |
2982 | /// mean_grad = decay * mean_grad + (1-decay) * gradient |
2983 | /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) |
2984 | /// |
2985 | /// $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ |
2986 | /// $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ |
2987 | /// $$var <- var - mom$$ |
2988 | /// |
2989 | /// Args: |
2990 | /// * scope: A Scope object |
2991 | /// * var: Should be from a Variable(). |
2992 | /// * mg: Should be from a Variable(). |
2993 | /// * ms: Should be from a Variable(). |
2994 | /// * mom: Should be from a Variable(). |
2995 | /// * lr: Scaling factor. Must be a scalar. |
2996 | /// * rho: Decay rate. Must be a scalar. |
2997 | /// * epsilon: Ridge term. Must be a scalar. |
2998 | /// * grad: The gradient. |
2999 | /// * indices: A vector of indices into the first dimension of var, ms and mom. |
3000 | /// |
3001 | /// Optional attributes (see `Attrs`): |
3002 | /// * use_locking: If `True`, updating of the var, mg, ms, and mom tensors is |
3003 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
3004 | /// contention. |
3005 | /// |
3006 | /// Returns: |
3007 | /// * `Output`: Same as "var". |
3008 | class SparseApplyCenteredRMSProp { |
3009 | public: |
3010 | /// Optional attribute setters for SparseApplyCenteredRMSProp |
3011 | struct Attrs { |
3012 | /// If `True`, updating of the var, mg, ms, and mom tensors is |
3013 | /// protected by a lock; otherwise the behavior is undefined, but may exhibit less |
3014 | /// contention. |
3015 | /// |
3016 | /// Defaults to false |
3017 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
3018 | Attrs ret = *this; |
3019 | ret.use_locking_ = x; |
3020 | return ret; |
3021 | } |
3022 | |
3023 | bool use_locking_ = false; |
3024 | }; |
3025 | SparseApplyCenteredRMSProp(const ::tensorflow::Scope& scope, |
3026 | ::tensorflow::Input var, ::tensorflow::Input mg, |
3027 | ::tensorflow::Input ms, ::tensorflow::Input mom, |
3028 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
3029 | ::tensorflow::Input momentum, ::tensorflow::Input |
3030 | epsilon, ::tensorflow::Input grad, |
3031 | ::tensorflow::Input indices); |
3032 | SparseApplyCenteredRMSProp(const ::tensorflow::Scope& scope, |
3033 | ::tensorflow::Input var, ::tensorflow::Input mg, |
3034 | ::tensorflow::Input ms, ::tensorflow::Input mom, |
3035 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
3036 | ::tensorflow::Input momentum, ::tensorflow::Input |
3037 | epsilon, ::tensorflow::Input grad, |
3038 | ::tensorflow::Input indices, const |
3039 | SparseApplyCenteredRMSProp::Attrs& attrs); |
3040 | operator ::tensorflow::Output() const { return out; } |
3041 | operator ::tensorflow::Input() const { return out; } |
3042 | ::tensorflow::Node* node() const { return out.node(); } |
3043 | |
3044 | static Attrs UseLocking(bool x) { |
3045 | return Attrs().UseLocking(x); |
3046 | } |
3047 | |
3048 | Operation operation; |
3049 | ::tensorflow::Output out; |
3050 | }; |
3051 | |
3052 | /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. |
3053 | /// |
3054 | /// That is for rows we have grad for, we update var, accum and linear as follows: |
3055 | /// $$accum_new = accum + grad * grad$$ |
3056 | /// $$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$ |
3057 | /// $$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$ |
3058 | /// $$var = (sign(linear) * l1 - linear) / quadratic\ if\ |linear| > l1\ else\ 0.0$$ |
3059 | /// $$accum = accum_{new}$$ |
3060 | /// |
3061 | /// Args: |
3062 | /// * scope: A Scope object |
3063 | /// * var: Should be from a Variable(). |
3064 | /// * accum: Should be from a Variable(). |
3065 | /// * linear: Should be from a Variable(). |
3066 | /// * grad: The gradient. |
3067 | /// * indices: A vector of indices into the first dimension of var and accum. |
3068 | /// * lr: Scaling factor. Must be a scalar. |
3069 | /// * l1: L1 regularization. Must be a scalar. |
3070 | /// * l2: L2 regularization. Must be a scalar. |
3071 | /// * lr_power: Scaling factor. Must be a scalar. |
3072 | /// |
3073 | /// Optional attributes (see `Attrs`): |
3074 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
3075 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
3076 | /// contention. |
3077 | /// |
3078 | /// Returns: |
3079 | /// * `Output`: Same as "var". |
3080 | class SparseApplyFtrl { |
3081 | public: |
3082 | /// Optional attribute setters for SparseApplyFtrl |
3083 | struct Attrs { |
3084 | /// If `True`, updating of the var and accum tensors will be protected |
3085 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
3086 | /// contention. |
3087 | /// |
3088 | /// Defaults to false |
3089 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
3090 | Attrs ret = *this; |
3091 | ret.use_locking_ = x; |
3092 | return ret; |
3093 | } |
3094 | |
3095 | /// Defaults to false |
3096 | TF_MUST_USE_RESULT Attrs MultiplyLinearByLr(bool x) { |
3097 | Attrs ret = *this; |
3098 | ret.multiply_linear_by_lr_ = x; |
3099 | return ret; |
3100 | } |
3101 | |
3102 | bool use_locking_ = false; |
3103 | bool multiply_linear_by_lr_ = false; |
3104 | }; |
3105 | SparseApplyFtrl(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
3106 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
3107 | ::tensorflow::Input grad, ::tensorflow::Input indices, |
3108 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
3109 | ::tensorflow::Input l2, ::tensorflow::Input lr_power); |
3110 | SparseApplyFtrl(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
3111 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
3112 | ::tensorflow::Input grad, ::tensorflow::Input indices, |
3113 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
3114 | ::tensorflow::Input l2, ::tensorflow::Input lr_power, const |
3115 | SparseApplyFtrl::Attrs& attrs); |
3116 | operator ::tensorflow::Output() const { return out; } |
3117 | operator ::tensorflow::Input() const { return out; } |
3118 | ::tensorflow::Node* node() const { return out.node(); } |
3119 | |
3120 | static Attrs UseLocking(bool x) { |
3121 | return Attrs().UseLocking(x); |
3122 | } |
3123 | static Attrs MultiplyLinearByLr(bool x) { |
3124 | return Attrs().MultiplyLinearByLr(x); |
3125 | } |
3126 | |
3127 | Operation operation; |
3128 | ::tensorflow::Output out; |
3129 | }; |
3130 | |
3131 | /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. |
3132 | /// |
3133 | /// That is for rows we have grad for, we update var, accum and linear as follows: |
3134 | /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var |
3135 | /// accum_new = accum + grad * grad |
3136 | /// linear += grad_with_shrinkage - |
3137 | /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var |
3138 | /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 |
3139 | /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 |
3140 | /// accum = accum_new |
3141 | /// |
3142 | /// Args: |
3143 | /// * scope: A Scope object |
3144 | /// * var: Should be from a Variable(). |
3145 | /// * accum: Should be from a Variable(). |
3146 | /// * linear: Should be from a Variable(). |
3147 | /// * grad: The gradient. |
3148 | /// * indices: A vector of indices into the first dimension of var and accum. |
3149 | /// * lr: Scaling factor. Must be a scalar. |
3150 | /// * l1: L1 regularization. Must be a scalar. |
3151 | /// * l2: L2 shrinkage regularization. Must be a scalar. |
3152 | /// * lr_power: Scaling factor. Must be a scalar. |
3153 | /// |
3154 | /// Optional attributes (see `Attrs`): |
3155 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
3156 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
3157 | /// contention. |
3158 | /// |
3159 | /// Returns: |
3160 | /// * `Output`: Same as "var". |
3161 | class SparseApplyFtrlV2 { |
3162 | public: |
3163 | /// Optional attribute setters for SparseApplyFtrlV2 |
3164 | struct Attrs { |
3165 | /// If `True`, updating of the var and accum tensors will be protected |
3166 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
3167 | /// contention. |
3168 | /// |
3169 | /// Defaults to false |
3170 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
3171 | Attrs ret = *this; |
3172 | ret.use_locking_ = x; |
3173 | return ret; |
3174 | } |
3175 | |
3176 | /// Defaults to false |
3177 | TF_MUST_USE_RESULT Attrs MultiplyLinearByLr(bool x) { |
3178 | Attrs ret = *this; |
3179 | ret.multiply_linear_by_lr_ = x; |
3180 | return ret; |
3181 | } |
3182 | |
3183 | bool use_locking_ = false; |
3184 | bool multiply_linear_by_lr_ = false; |
3185 | }; |
3186 | SparseApplyFtrlV2(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
3187 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
3188 | ::tensorflow::Input grad, ::tensorflow::Input indices, |
3189 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
3190 | ::tensorflow::Input l2, ::tensorflow::Input l2_shrinkage, |
3191 | ::tensorflow::Input lr_power); |
3192 | SparseApplyFtrlV2(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
3193 | ::tensorflow::Input accum, ::tensorflow::Input linear, |
3194 | ::tensorflow::Input grad, ::tensorflow::Input indices, |
3195 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
3196 | ::tensorflow::Input l2, ::tensorflow::Input l2_shrinkage, |
3197 | ::tensorflow::Input lr_power, const SparseApplyFtrlV2::Attrs& |
3198 | attrs); |
3199 | operator ::tensorflow::Output() const { return out; } |
3200 | operator ::tensorflow::Input() const { return out; } |
3201 | ::tensorflow::Node* node() const { return out.node(); } |
3202 | |
3203 | static Attrs UseLocking(bool x) { |
3204 | return Attrs().UseLocking(x); |
3205 | } |
3206 | static Attrs MultiplyLinearByLr(bool x) { |
3207 | return Attrs().MultiplyLinearByLr(x); |
3208 | } |
3209 | |
3210 | Operation operation; |
3211 | ::tensorflow::Output out; |
3212 | }; |
3213 | |
3214 | /// Update relevant entries in '*var' and '*accum' according to the momentum scheme. |
3215 | /// |
3216 | /// Set use_nesterov = True if you want to use Nesterov momentum. |
3217 | /// |
3218 | /// That is for rows we have grad for, we update var and accum as follows: |
3219 | /// |
3220 | /// $$accum = accum * momentum + grad$$ |
3221 | /// $$var -= lr * accum$$ |
3222 | /// |
3223 | /// Args: |
3224 | /// * scope: A Scope object |
3225 | /// * var: Should be from a Variable(). |
3226 | /// * accum: Should be from a Variable(). |
3227 | /// * lr: Learning rate. Must be a scalar. |
3228 | /// * grad: The gradient. |
3229 | /// * indices: A vector of indices into the first dimension of var and accum. |
3230 | /// * momentum: Momentum. Must be a scalar. |
3231 | /// |
3232 | /// Optional attributes (see `Attrs`): |
3233 | /// * use_locking: If `True`, updating of the var and accum tensors will be protected |
3234 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
3235 | /// contention. |
3236 | /// * use_nesterov: If `True`, the tensor passed to compute grad will be |
3237 | /// var - lr * momentum * accum, so in the end, the var you get is actually |
3238 | /// var - lr * momentum * accum. |
3239 | /// |
3240 | /// Returns: |
3241 | /// * `Output`: Same as "var". |
3242 | class SparseApplyMomentum { |
3243 | public: |
3244 | /// Optional attribute setters for SparseApplyMomentum |
3245 | struct Attrs { |
3246 | /// If `True`, updating of the var and accum tensors will be protected |
3247 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
3248 | /// contention. |
3249 | /// |
3250 | /// Defaults to false |
3251 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
3252 | Attrs ret = *this; |
3253 | ret.use_locking_ = x; |
3254 | return ret; |
3255 | } |
3256 | |
3257 | /// If `True`, the tensor passed to compute grad will be |
3258 | /// var - lr * momentum * accum, so in the end, the var you get is actually |
3259 | /// var - lr * momentum * accum. |
3260 | /// |
3261 | /// Defaults to false |
3262 | TF_MUST_USE_RESULT Attrs UseNesterov(bool x) { |
3263 | Attrs ret = *this; |
3264 | ret.use_nesterov_ = x; |
3265 | return ret; |
3266 | } |
3267 | |
3268 | bool use_locking_ = false; |
3269 | bool use_nesterov_ = false; |
3270 | }; |
3271 | SparseApplyMomentum(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
3272 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
3273 | ::tensorflow::Input grad, ::tensorflow::Input indices, |
3274 | ::tensorflow::Input momentum); |
3275 | SparseApplyMomentum(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
3276 | ::tensorflow::Input accum, ::tensorflow::Input lr, |
3277 | ::tensorflow::Input grad, ::tensorflow::Input indices, |
3278 | ::tensorflow::Input momentum, const |
3279 | SparseApplyMomentum::Attrs& attrs); |
3280 | operator ::tensorflow::Output() const { return out; } |
3281 | operator ::tensorflow::Input() const { return out; } |
3282 | ::tensorflow::Node* node() const { return out.node(); } |
3283 | |
3284 | static Attrs UseLocking(bool x) { |
3285 | return Attrs().UseLocking(x); |
3286 | } |
3287 | static Attrs UseNesterov(bool x) { |
3288 | return Attrs().UseNesterov(x); |
3289 | } |
3290 | |
3291 | Operation operation; |
3292 | ::tensorflow::Output out; |
3293 | }; |
3294 | |
3295 | /// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. |
3296 | /// |
3297 | /// That is for rows we have grad for, we update var and accum as follows: |
3298 | /// $$accum += grad * grad$$ |
3299 | /// $$prox_v = var$$ |
3300 | /// $$prox_v -= lr * grad * (1 / sqrt(accum))$$ |
3301 | /// $$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$ |
3302 | /// |
3303 | /// Args: |
3304 | /// * scope: A Scope object |
3305 | /// * var: Should be from a Variable(). |
3306 | /// * accum: Should be from a Variable(). |
3307 | /// * lr: Learning rate. Must be a scalar. |
3308 | /// * l1: L1 regularization. Must be a scalar. |
3309 | /// * l2: L2 regularization. Must be a scalar. |
3310 | /// * grad: The gradient. |
3311 | /// * indices: A vector of indices into the first dimension of var and accum. |
3312 | /// |
3313 | /// Optional attributes (see `Attrs`): |
3314 | /// * use_locking: If True, updating of the var and accum tensors will be protected by |
3315 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
3316 | /// |
3317 | /// Returns: |
3318 | /// * `Output`: Same as "var". |
3319 | class SparseApplyProximalAdagrad { |
3320 | public: |
3321 | /// Optional attribute setters for SparseApplyProximalAdagrad |
3322 | struct Attrs { |
3323 | /// If True, updating of the var and accum tensors will be protected by |
3324 | /// a lock; otherwise the behavior is undefined, but may exhibit less contention. |
3325 | /// |
3326 | /// Defaults to false |
3327 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
3328 | Attrs ret = *this; |
3329 | ret.use_locking_ = x; |
3330 | return ret; |
3331 | } |
3332 | |
3333 | bool use_locking_ = false; |
3334 | }; |
3335 | SparseApplyProximalAdagrad(const ::tensorflow::Scope& scope, |
3336 | ::tensorflow::Input var, ::tensorflow::Input accum, |
3337 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
3338 | ::tensorflow::Input l2, ::tensorflow::Input grad, |
3339 | ::tensorflow::Input indices); |
3340 | SparseApplyProximalAdagrad(const ::tensorflow::Scope& scope, |
3341 | ::tensorflow::Input var, ::tensorflow::Input accum, |
3342 | ::tensorflow::Input lr, ::tensorflow::Input l1, |
3343 | ::tensorflow::Input l2, ::tensorflow::Input grad, |
3344 | ::tensorflow::Input indices, const |
3345 | SparseApplyProximalAdagrad::Attrs& attrs); |
3346 | operator ::tensorflow::Output() const { return out; } |
3347 | operator ::tensorflow::Input() const { return out; } |
3348 | ::tensorflow::Node* node() const { return out.node(); } |
3349 | |
3350 | static Attrs UseLocking(bool x) { |
3351 | return Attrs().UseLocking(x); |
3352 | } |
3353 | |
3354 | Operation operation; |
3355 | ::tensorflow::Output out; |
3356 | }; |
3357 | |
3358 | /// Sparse update '*var' as FOBOS algorithm with fixed learning rate. |
3359 | /// |
3360 | /// That is for rows we have grad for, we update var as follows: |
3361 | /// $$prox_v = var - alpha * grad$$ |
3362 | /// $$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$ |
3363 | /// |
3364 | /// Args: |
3365 | /// * scope: A Scope object |
3366 | /// * var: Should be from a Variable(). |
3367 | /// * alpha: Scaling factor. Must be a scalar. |
3368 | /// * l1: L1 regularization. Must be a scalar. |
3369 | /// * l2: L2 regularization. Must be a scalar. |
3370 | /// * grad: The gradient. |
3371 | /// * indices: A vector of indices into the first dimension of var and accum. |
3372 | /// |
3373 | /// Optional attributes (see `Attrs`): |
3374 | /// * use_locking: If True, the subtraction will be protected by a lock; |
3375 | /// otherwise the behavior is undefined, but may exhibit less contention. |
3376 | /// |
3377 | /// Returns: |
3378 | /// * `Output`: Same as "var". |
3379 | class SparseApplyProximalGradientDescent { |
3380 | public: |
3381 | /// Optional attribute setters for SparseApplyProximalGradientDescent |
3382 | struct Attrs { |
3383 | /// If True, the subtraction will be protected by a lock; |
3384 | /// otherwise the behavior is undefined, but may exhibit less contention. |
3385 | /// |
3386 | /// Defaults to false |
3387 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
3388 | Attrs ret = *this; |
3389 | ret.use_locking_ = x; |
3390 | return ret; |
3391 | } |
3392 | |
3393 | bool use_locking_ = false; |
3394 | }; |
3395 | SparseApplyProximalGradientDescent(const ::tensorflow::Scope& scope, |
3396 | ::tensorflow::Input var, ::tensorflow::Input |
3397 | alpha, ::tensorflow::Input l1, |
3398 | ::tensorflow::Input l2, ::tensorflow::Input |
3399 | grad, ::tensorflow::Input indices); |
3400 | SparseApplyProximalGradientDescent(const ::tensorflow::Scope& scope, |
3401 | ::tensorflow::Input var, ::tensorflow::Input |
3402 | alpha, ::tensorflow::Input l1, |
3403 | ::tensorflow::Input l2, ::tensorflow::Input |
3404 | grad, ::tensorflow::Input indices, const |
3405 | SparseApplyProximalGradientDescent::Attrs& |
3406 | attrs); |
3407 | operator ::tensorflow::Output() const { return out; } |
3408 | operator ::tensorflow::Input() const { return out; } |
3409 | ::tensorflow::Node* node() const { return out.node(); } |
3410 | |
3411 | static Attrs UseLocking(bool x) { |
3412 | return Attrs().UseLocking(x); |
3413 | } |
3414 | |
3415 | Operation operation; |
3416 | ::tensorflow::Output out; |
3417 | }; |
3418 | |
3419 | /// Update '*var' according to the RMSProp algorithm. |
3420 | /// |
3421 | /// Note that in dense implementation of this algorithm, ms and mom will |
3422 | /// update even if the grad is zero, but in this sparse implementation, ms |
3423 | /// and mom will not update in iterations during which the grad is zero. |
3424 | /// |
3425 | /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 |
3426 | /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) |
3427 | /// |
3428 | /// $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ |
3429 | /// $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ |
3430 | /// $$var <- var - mom$$ |
3431 | /// |
3432 | /// Args: |
3433 | /// * scope: A Scope object |
3434 | /// * var: Should be from a Variable(). |
3435 | /// * ms: Should be from a Variable(). |
3436 | /// * mom: Should be from a Variable(). |
3437 | /// * lr: Scaling factor. Must be a scalar. |
3438 | /// * rho: Decay rate. Must be a scalar. |
3439 | /// * epsilon: Ridge term. Must be a scalar. |
3440 | /// * grad: The gradient. |
3441 | /// * indices: A vector of indices into the first dimension of var, ms and mom. |
3442 | /// |
3443 | /// Optional attributes (see `Attrs`): |
3444 | /// * use_locking: If `True`, updating of the var, ms, and mom tensors is protected |
3445 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
3446 | /// contention. |
3447 | /// |
3448 | /// Returns: |
3449 | /// * `Output`: Same as "var". |
3450 | class SparseApplyRMSProp { |
3451 | public: |
3452 | /// Optional attribute setters for SparseApplyRMSProp |
3453 | struct Attrs { |
3454 | /// If `True`, updating of the var, ms, and mom tensors is protected |
3455 | /// by a lock; otherwise the behavior is undefined, but may exhibit less |
3456 | /// contention. |
3457 | /// |
3458 | /// Defaults to false |
3459 | TF_MUST_USE_RESULT Attrs UseLocking(bool x) { |
3460 | Attrs ret = *this; |
3461 | ret.use_locking_ = x; |
3462 | return ret; |
3463 | } |
3464 | |
3465 | bool use_locking_ = false; |
3466 | }; |
3467 | SparseApplyRMSProp(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
3468 | ::tensorflow::Input ms, ::tensorflow::Input mom, |
3469 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
3470 | ::tensorflow::Input momentum, ::tensorflow::Input epsilon, |
3471 | ::tensorflow::Input grad, ::tensorflow::Input indices); |
3472 | SparseApplyRMSProp(const ::tensorflow::Scope& scope, ::tensorflow::Input var, |
3473 | ::tensorflow::Input ms, ::tensorflow::Input mom, |
3474 | ::tensorflow::Input lr, ::tensorflow::Input rho, |
3475 | ::tensorflow::Input momentum, ::tensorflow::Input epsilon, |
3476 | ::tensorflow::Input grad, ::tensorflow::Input indices, const |
3477 | SparseApplyRMSProp::Attrs& attrs); |
3478 | operator ::tensorflow::Output() const { return out; } |
3479 | operator ::tensorflow::Input() const { return out; } |
3480 | ::tensorflow::Node* node() const { return out.node(); } |
3481 | |
3482 | static Attrs UseLocking(bool x) { |
3483 | return Attrs().UseLocking(x); |
3484 | } |
3485 | |
3486 | Operation operation; |
3487 | ::tensorflow::Output out; |
3488 | }; |
3489 | |
3490 | /// @} |
3491 | |
3492 | } // namespace ops |
3493 | } // namespace tensorflow |
3494 | |
3495 | #endif // TENSORFLOW_CC_OPS_TRAINING_OPS_H_ |
3496 | |