1/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_SCOPED_ALLOCATOR_H_
16#define TENSORFLOW_CORE_COMMON_RUNTIME_SCOPED_ALLOCATOR_H_
17
18#include "tensorflow/core/framework/allocator.h"
19#include "tensorflow/core/framework/tensor.h"
20#include "tensorflow/core/lib/core/refcount.h"
21#include "tensorflow/core/platform/mutex.h"
22
23namespace tensorflow {
24class ScopedAllocatorContainer;
25class ScopedAllocatorInstance;
26
27// Manages a single backing tensor and a collection of aliases.
28class ScopedAllocator {
29 public:
30 static constexpr int32_t kInvalidId = 0;
31 static constexpr size_t kMaxAlignment = 64;
32
33 // A subrange of the TensorBuffer associated with this object that
34 // will be the backing memory for one aliased tensor.
35 struct Field {
36 int32 scope_id;
37 size_t offset;
38 size_t bytes_requested;
39 size_t bytes_allocated;
40 };
41 // Field index that refers to backing tensor, not any aliased field.
42 static constexpr int32_t kBackingIndex = -1;
43
44 // backing_tensor is expected to be newly allocated by a ScopedAllocatorOp
45 // instance. It must be large enough to back all of the specified
46 // (offset, byte) ranges of the fields.
47 ScopedAllocator(const Tensor& backing_tensor, int32_t scope_id,
48 const std::string& name, const gtl::ArraySlice<Field> fields,
49 int32_t expected_call_count,
50 ScopedAllocatorContainer* container);
51
52 // Automatically deletes when last use expires, or when
53 // ScopedAllocatorContainer decides to delete.
54 ~ScopedAllocator() TF_LOCKS_EXCLUDED(mu_);
55
56 // For debugging: returns true iff p is a pointer that could have
57 // been returned by AllocateRaw.
58 bool VerifyPointer(const void* p);
59 bool VerifyTensor(const Tensor* t);
60
61 const Tensor& tensor() const { return backing_tensor_; }
62
63 const std::string& name() const { return name_; }
64
65 private:
66 friend class ScopedAllocatorInstance;
67 // Only ScopedAllocatorInstances can call AllocateRaw and DeallocateRaw on a
68 // ScopedAllocator
69 void* AllocateRaw(int32_t field_index, size_t num_bytes)
70 TF_LOCKS_EXCLUDED(mu_);
71 void DeallocateRaw(void* p) TF_LOCKS_EXCLUDED(mu_);
72 Tensor backing_tensor_;
73 TensorBuffer* tbuf_;
74 int32 id_;
75 std::string name_;
76 ScopedAllocatorContainer* container_;
77 std::vector<Field> fields_;
78 mutex mu_;
79 int32 expected_call_count_ TF_GUARDED_BY(mu_);
80 int32 live_alloc_count_ TF_GUARDED_BY(mu_);
81};
82
83// An Allocator that will return a pointer into the backing buffer of
84// a previously allocated tensor, allowing creation of an alias
85// tensor. There is a one-to-one mapping between the fields of a
86// ScopedAllocator and ScopedAllocatorInstances. There is also a one-to-one
87// mapping between scope_ids and ScopedAllocatorInstances. It should be
88// discarded immediately after a single use.
89class ScopedAllocatorInstance : public Allocator {
90 public:
91 explicit ScopedAllocatorInstance(ScopedAllocator* sa, int32_t field_index);
92
93 private:
94 ~ScopedAllocatorInstance() override {
95 VLOG(1) << "~ScopedAllocatorInstance " << this;
96 }
97
98 public:
99 // When a ScopedAllocatorContainer "Drops" a scope_id, it calls DropFromTable
100 // on the underlying ScopedAllocatorInstance. If this instance has already
101 // deallocated the tensor slice, we can safely delete this.
102 void DropFromTable() TF_LOCKS_EXCLUDED(mu_);
103 void* AllocateRaw(size_t alignment, size_t num_bytes)
104 TF_LOCKS_EXCLUDED(mu_) override;
105 void* AllocateRaw(size_t alignment, size_t num_bytes,
106 const AllocationAttributes& allocator_attr) override {
107 return AllocateRaw(alignment, num_bytes);
108 }
109 void DeallocateRaw(void* p) TF_LOCKS_EXCLUDED(mu_) override;
110 bool TracksAllocationSizes() const override { return false; }
111 size_t RequestedSize(const void* ptr) const override { return 0; }
112 size_t AllocatedSize(const void* ptr) const override { return 0; }
113 int64_t AllocationId(const void* ptr) const override { return 0; }
114 size_t AllocatedSizeSlow(const void* ptr) const override { return 0; }
115 std::string Name() override;
116
117 private:
118 mutex mu_;
119 ScopedAllocator* scoped_allocator_;
120 int32 field_index_;
121 bool allocated_ TF_GUARDED_BY(mu_);
122 bool deallocated_ TF_GUARDED_BY(mu_);
123 bool in_table_ TF_GUARDED_BY(mu_);
124};
125
126} // namespace tensorflow
127#endif // TENSORFLOW_CORE_COMMON_RUNTIME_SCOPED_ALLOCATOR_H_
128