1/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#ifndef TENSORFLOW_CORE_FRAMEWORK_RESOURCE_VAR_H_
17#define TENSORFLOW_CORE_FRAMEWORK_RESOURCE_VAR_H_
18
19#include "tensorflow/core/framework/resource_base.h"
20#include "tensorflow/core/framework/tensor.h"
21#include "tensorflow/core/lib/core/status.h"
22
23// Forward declarations to avoid introducing a dependency on headers in
24// "tensorflow/core/graph/...".
25class GraphDefBuilder;
26
27namespace tensorflow {
28
29// Resource stored by variables in the resource manager (new, resource-style
30// version).
31//
32// These variables have a mixed access mode: they can operate on copy-on-write
33// mode (the default) or copy-on-read mode (used only for sparse access).
34//
35// When copy-on-write mode is enabled reading the value of the variable involves
36// grabbing its mutex in shared mode and aliasing the internal tensor as the
37// output of the read operation, increasing its reference count. Writing,
38// conversely, works by, under an exclusive lock, detecting whether there are
39// outstanding aliases of the tensor, using the reference count, copying the
40// tensor if they exist, and writing to either the original or a copy with no
41// outstanding aliases. Sparse operations are not supported in copy-on-write
42// mode.
43//
44// When a variable is accessed sparsely it switches to copy-on-read mode. To
45// switch we need to grab an exclusive lock and might (if there are aliases)
46// need to copy the entire tensor. Once copy-on-read mode is enabled, no tensor
47// is allowed to alias the variable's internal tensor. This means dense reads
48// must return a copy of the variable, done while holding a shared lock. Dense
49// writes do not need to check whether aliases exist, and can always write
50// directly to the buffer without making a copy, while holding an exclusive
51// lock. Sparse reads and sparse writes, on the other hand, can be done under a
52// shared or exclusive mutex (the damage from writes under a shared mutex is
53// limited since no other buffer is allowed to alias the variable's
54// buffer). Using an exclusive mutex disallows concurrent writes and concurrent
55// sparse reads, providing some extra safety at the expense of performance,
56// while shared mutex allow for "hogwild" behavior. Doing sparse writes under a
57// shared mutex prevents them from overlapping with dense writes, which is
58// necessary as dense writes can change the shape the of the tensor.
59//
60// Transitioning a variable from copy-on-read mode to copy-on-write mode is
61// currently not supported. To upgrade a variable from copy-on-write to
62// copy-on-read use `EnsureSparseVariableAccess()`, and then grab the variable's
63// mutex as desired. To access the variable in dense mode grab the mutex either
64// directly or via `MaybeLockVariableInputMutexesInOrder` on all variables being
65// modified and then call `PrepareToUpdateVariable` on them in any order.
66class Var : public ResourceBase {
67 public:
68 explicit Var(DataType dtype) : tensor_(dtype) {}
69
70 // When locking multiple variables, the locks must be acquired in order of
71 // increasing mu() address.
72 // TODO(ebrevdo): Use LockSet instead of exposing mu.
73 mutex* mu() { return &mu_; }
74 Tensor* tensor() { return &tensor_; }
75
76 // Uninitializes the variable, by reverting the state of the tensor to
77 // the state when the variable is first created.
78 void Uninitialize() {
79 // move frees the buffer of the tensor after unused goes out of scope.
80 Tensor unused = std::move(tensor_);
81 is_initialized = false;
82 }
83
84 Status AsGraphDef(GraphDefBuilder* builder, Node** out) const override;
85
86 std::string DebugString() const override {
87 return strings::StrCat(DataTypeString(tensor_.dtype()), "/",
88 tensor_.shape().DebugString());
89 }
90
91 // Only used in the resource variable path. In resource variables,
92 // tensor.IsInitialized() can be true (i.e. have memory allocated to it) while
93 // there is not a good value there due to a race condition, and it's possible
94 // to stumble upon this during variable.initialized_value(). So it's best to
95 // just store directly whether the variable is initialized.
96 bool is_initialized = false; // TF_GUARDED_BY(mu_) but annotalysis doesn't
97 // like it.
98
99 // Also fake-guarded by mu_. Should be set to True whenever any sparse
100 // operation uses the variable. Once this is true no tensor is allowed to
101 // alias the memory of the variable, and we always copy the variable on
102 // reads. This allows sparse operations to happen with only a shared lock if
103 // so desired.
104 std::atomic<bool> copy_on_read_mode{false};
105
106 private:
107 mutex mu_;
108 Tensor tensor_;
109
110 ~Var() override {}
111 TF_DISALLOW_COPY_AND_ASSIGN(Var);
112};
113
114// Does unlock and unref automatically when going out of scope, and also
115// supports early manual release.
116class TF_SCOPED_LOCKABLE ScopedUnlockUnrefVar {
117 public:
118 explicit ScopedUnlockUnrefVar(Var* var) TF_EXCLUSIVE_LOCK_FUNCTION(var_->mu())
119 : var_(var) {
120 if (var_) {
121 var_->mu()->lock();
122 }
123 }
124 void Release() TF_UNLOCK_FUNCTION() {
125 if (var_) {
126 var_->mu()->unlock();
127 var_->Unref();
128 var_ = nullptr;
129 }
130 }
131 ~ScopedUnlockUnrefVar() TF_UNLOCK_FUNCTION() { Release(); }
132
133 private:
134 Var* var_;
135
136 ScopedUnlockUnrefVar(const ScopedUnlockUnrefVar&) = delete;
137 ScopedUnlockUnrefVar(ScopedUnlockUnrefVar&&) = delete;
138 ScopedUnlockUnrefVar& operator=(const ScopedUnlockUnrefVar&) = delete;
139 ScopedUnlockUnrefVar& operator=(ScopedUnlockUnrefVar&&) = delete;
140};
141
142} // end namespace tensorflow
143
144#endif // TENSORFLOW_CORE_FRAMEWORK_RESOURCE_VAR_H_
145