1/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#include "tensorflow/c/tf_tensor.h"
17
18#include <memory>
19#include <vector>
20
21#include "tensorflow/c/tf_status.h"
22#include "tensorflow/c/tf_status_helper.h"
23#include "tensorflow/c/tf_tensor_internal.h"
24#include "tensorflow/core/framework/allocation_description.pb.h"
25#include "tensorflow/core/framework/log_memory.h"
26#include "tensorflow/core/framework/tensor.h"
27#include "tensorflow/core/framework/tensor_shape.pb.h"
28#include "tensorflow/core/framework/types.pb.h"
29#include "tensorflow/core/lib/core/coding.h"
30#include "tensorflow/core/platform/casts.h"
31
32using tensorflow::Status;
33using tensorflow::Tensor;
34using tensorflow::TensorBuffer;
35using tensorflow::errors::FailedPrecondition;
36using tensorflow::errors::InvalidArgument;
37
38namespace tensorflow {
39void* allocate_tensor(const char* operation, size_t len, Allocator* allocator) {
40 void* data = allocator->AllocateRaw(EIGEN_MAX_ALIGN_BYTES, len);
41 if (LogMemory::IsEnabled() && data != nullptr) {
42 LogMemory::RecordRawAllocation(
43 operation, LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, len, data,
44 allocator);
45 }
46 return data;
47}
48
49void* allocate_tensor(const char* operation, size_t len) {
50 return allocate_tensor(operation, len, cpu_allocator());
51}
52
53void deallocate_buffer(void* data, size_t len, void* arg) {
54 Allocator* allocator = nullptr;
55 if (arg == nullptr) {
56 allocator = cpu_allocator();
57 } else {
58 allocator = reinterpret_cast<Allocator*>(arg);
59 }
60 if (LogMemory::IsEnabled() && data != nullptr) {
61 LogMemory::RecordRawDeallocation(
62 "TensorFlow C Api", LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, data,
63 allocator, false);
64 }
65 allocator->DeallocateRaw(data);
66}
67} // namespace tensorflow
68
69namespace {
70TF_Tensor* CreateTensor(TF_ManagedBuffer* buf, TF_DataType dtype,
71 const int64_t* dims, int num_dims, size_t len) {
72 std::vector<int64_t> dimvec(num_dims);
73 for (int i = 0; i < num_dims; ++i) {
74 dimvec[i] = static_cast<int64_t>(dims[i]);
75 }
76
77 // TODO(gjn): Make the choice of interface a compile-time configuration.
78 tensorflow::TensorInterface ret(
79 Tensor(static_cast<tensorflow::DataType>(dtype),
80 tensorflow::TensorShape(dimvec), buf));
81 buf->Unref();
82 size_t elem_size = TF_DataTypeSize(dtype);
83 if (elem_size > 0 && len < (elem_size * ret.NumElements())) {
84 return nullptr;
85 }
86 return new TF_Tensor{new tensorflow::TensorInterface(ret)};
87}
88} // namespace
89
90TF_Tensor* TF_AllocateTensor(TF_DataType dtype, const int64_t* dims,
91 int num_dims, size_t len) {
92 void* data = tensorflow::allocate_tensor("TF_AllocateTensor", len,
93 tensorflow::cpu_allocator());
94 TF_ManagedBuffer* buf =
95 new TF_ManagedBuffer(data, len, tensorflow::deallocate_buffer,
96 tensorflow::cpu_allocator(), /*owns_memory=*/true);
97 return CreateTensor(buf, dtype, dims, num_dims, len);
98}
99
100TF_Tensor* TF_NewTensor(TF_DataType dtype, const int64_t* dims, int num_dims,
101 void* data, size_t len,
102 void (*deallocator)(void* data, size_t len, void* arg),
103 void* deallocator_arg) {
104 TF_ManagedBuffer* buf = nullptr;
105 if (dtype != TF_STRING && dtype != TF_RESOURCE &&
106 tensorflow::DataTypeCanUseMemcpy(
107 static_cast<tensorflow::DataType>(dtype)) &&
108 reinterpret_cast<intptr_t>(data) % std::max(1, EIGEN_MAX_ALIGN_BYTES) !=
109 0) {
110 // TF_STRING and TF_RESOURCE tensors have a different representation in
111 // TF_Tensor than they do in tensorflow::Tensor. So a copy here is a waste
112 // (any alignment requirements will be taken care of by TF_TensorToTensor
113 // and TF_TensorFromTensor).
114 //
115 // Other types have the same representation, so copy only if it is safe to
116 // do so.
117 buf = new TF_ManagedBuffer(tensorflow::allocate_tensor("TF_NewTensor", len),
118 len, tensorflow::deallocate_buffer, nullptr,
119 /*owns_memory=*/true);
120 std::memcpy(buf->data(), data, len);
121 // Free the original buffer.
122 deallocator(data, len, deallocator_arg);
123 } else {
124 buf = new TF_ManagedBuffer(data, len, deallocator, deallocator_arg,
125 /*owns_memory=*/false);
126 }
127
128 return CreateTensor(buf, dtype, dims, num_dims, len);
129}
130
131TF_Tensor* TF_TensorMaybeMove(TF_Tensor* t) {
132 return t->tensor->CanMove() ? t : nullptr;
133}
134
135void TF_DeleteTensor(TF_Tensor* t) {
136 if (t == nullptr) {
137 return;
138 }
139
140 if (t->tensor) {
141 t->tensor->Release();
142 }
143
144 delete t;
145}
146
147TF_DataType TF_TensorType(const TF_Tensor* t) {
148 return static_cast<TF_DataType>(t->tensor->Type());
149}
150
151void TF_SetShape(TF_Tensor* t, const int64_t* dims, int num_dims) {
152 tensorflow::down_cast<tensorflow::TensorInterface*>(t->tensor)->SetShape(
153 dims, num_dims);
154}
155
156int TF_NumDims(const TF_Tensor* t) { return t->tensor->NumDims(); }
157
158int64_t TF_Dim(const TF_Tensor* t, int dim_index) {
159 return t->tensor->Dim(dim_index);
160}
161
162size_t TF_TensorByteSize(const TF_Tensor* t) { return t->tensor->ByteSize(); }
163
164void* TF_TensorData(const TF_Tensor* t) { return t->tensor->Data(); }
165
166int64_t TF_TensorElementCount(const TF_Tensor* t) {
167 int64_t result = 1;
168 int rank = TF_NumDims(t);
169 for (int dim = 0; dim < rank; ++dim) {
170 result *= TF_Dim(t, dim);
171 }
172 return result;
173}
174
175void TF_TensorBitcastFrom(const TF_Tensor* from, TF_DataType type,
176 TF_Tensor* to, const int64_t* new_dims,
177 int num_new_dims, TF_Status* status) {
178 TF_SetStatus(status, TF_OK, "");
179 Status cc_status(
180 tensorflow::down_cast<tensorflow::TensorInterface*>(to->tensor)
181 ->BitcastFrom(
182 *tensorflow::down_cast<const tensorflow::TensorInterface*>(
183 from->tensor),
184 static_cast<tensorflow::DataType>(type), new_dims, num_new_dims));
185 Set_TF_Status_from_Status(status, cc_status);
186}
187
188namespace tensorflow {
189
190void TensorInterface::Release() {
191 if (Type() == DT_STRING && NumElements() > 0) {
192 TF_TString* data = static_cast<TF_TString*>(Data());
193 if (CanMove() && data != nullptr) {
194 for (int64_t i = 0; i < NumElements(); ++i) {
195 TF_TString_Dealloc(&data[i]);
196 }
197 }
198 }
199 delete this;
200}
201
202bool TensorInterface::CanMove() const {
203 // It is safe to move the Tensor if and only if we own the unique reference to
204 // it. In that case, we might as well not delete and reallocate, but a future
205 // implementation might need to do so.
206 TensorBuffer* buf = tensorflow::TensorCApi::Buffer(tensor_);
207 if (buf->RefCountIsOne() && buf->root_buffer()->RefCountIsOne() &&
208 buf->OwnsMemory()) {
209 return true;
210 }
211 return false;
212}
213
214std::string TensorInterface::SummarizeValue() const {
215 return tensor_.SummarizeValue(/*max_entries=*/3, /*print_v2=*/true);
216}
217
218DataType TensorInterface::Type() const { return tensor_.dtype(); }
219
220int TensorInterface::NumDims() const { return tensor_.dims(); }
221
222int64_t TensorInterface::Dim(int dim_index) const {
223 return static_cast<int64_t>(tensor_.dim_size(dim_index));
224}
225
226int64_t TensorInterface::NumElements() const {
227 return static_cast<int64_t>(tensor_.NumElements());
228}
229
230size_t TensorInterface::ByteSize() const {
231 return tensorflow::TensorCApi::Buffer(tensor_)->size();
232}
233
234void* TensorInterface::Data() const {
235 return tensorflow::TensorCApi::Buffer(tensor_)->data();
236}
237
238void TensorInterface::SetShape(const int64_t* dims, int num_dims) {
239 tensorflow::TensorShape s;
240 for (int i = 0; i < num_dims; ++i) {
241 s.AddDim(dims[i]);
242 }
243 tensor_.set_shape(s);
244}
245
246Status TensorInterface::BitcastFrom(const TensorInterface& from, DataType type,
247 const int64_t* new_dims, int num_new_dims) {
248 tensorflow::TensorShape s;
249 for (int i = 0; i < num_new_dims; ++i) {
250 s.AddDim(new_dims[i]);
251 }
252 return tensor_.BitcastFrom(from.tensor_, type, s);
253}
254
255Status TensorInterface::FromProto(const tensorflow::TensorProto& from) {
256 bool success = tensor_.FromProto(from);
257 if (success) return OkStatus();
258 return errors::InvalidArgument("Unparseable tensor proto");
259}
260
261} // namespace tensorflow
262
263// --------------------------------------------------------------------------
264
265static void DeleteArray(void* data, size_t size, void* arg) {
266 DCHECK_EQ(data, arg);
267 delete[] reinterpret_cast<char*>(arg);
268}
269
270// Create an empty tensor of type 'dtype'. 'shape' can be arbitrary, but has to
271// result in a zero-sized tensor.
272static TF_Tensor* EmptyTensor(TF_DataType dtype,
273 const tensorflow::TensorShape& shape) {
274 static char empty;
275 int64_t nelems = 1;
276 std::vector<int64_t> dims;
277 auto shape_dims = shape.dims();
278 dims.reserve(shape_dims);
279 for (int i = 0; i < shape_dims; ++i) {
280 dims.push_back(shape.dim_size(i));
281 nelems *= shape.dim_size(i);
282 }
283 CHECK_EQ(nelems, 0);
284 return TF_NewTensor(
285 dtype, reinterpret_cast<const int64_t*>(dims.data()), shape.dims(),
286 reinterpret_cast<void*>(&empty), 0, [](void*, size_t, void*) {}, nullptr);
287}
288
289namespace tensorflow {
290
291// Non-static for testing.
292TF_Tensor* TF_TensorFromTensor(const tensorflow::Tensor& src, Status* status) {
293 *status = OkStatus();
294 if (!src.IsInitialized()) {
295 *status = FailedPrecondition(
296 "attempt to use a tensor with an uninitialized value");
297 return nullptr;
298 }
299 if (src.NumElements() == 0) {
300 return EmptyTensor(static_cast<TF_DataType>(src.dtype()), src.shape());
301 }
302
303 Tensor tensor;
304 if (!tensor.CopyFrom(src, src.shape())) {
305 return nullptr;
306 }
307 return new TF_Tensor{new tensorflow::TensorInterface(std::move(tensor))};
308}
309
310TF_Tensor* TF_TensorFromTensorShallow(const tensorflow::Tensor& src,
311 Status* status) {
312 *status = OkStatus();
313 if (!src.IsInitialized()) {
314 *status = FailedPrecondition(
315 "attempt to use a tensor with an uninitialized value");
316 return nullptr;
317 }
318 if (src.NumElements() == 0) {
319 return EmptyTensor(static_cast<TF_DataType>(src.dtype()), src.shape());
320 }
321 return new TF_Tensor{new tensorflow::TensorInterface(src)};
322}
323
324Status TF_TensorToTensor(const TF_Tensor* src, Tensor* dst) {
325 return tensorflow::down_cast<const tensorflow::TensorInterface*>(src->tensor)
326 ->ToTensor(dst);
327}
328
329Status TensorInterface::ToTensor(tensorflow::Tensor* dst) const {
330 *dst = tensor_;
331 return OkStatus();
332}
333
334bool TensorInterface::IsAligned() const { return tensor_.IsAligned(); }
335
336} // namespace tensorflow
337
338bool TF_TensorIsAligned(const TF_Tensor* t) { return t->tensor->IsAligned(); }
339