1 | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. |
2 | |
3 | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | you may not use this file except in compliance with the License. |
5 | You may obtain a copy of the License at |
6 | |
7 | http://www.apache.org/licenses/LICENSE-2.0 |
8 | |
9 | Unless required by applicable law or agreed to in writing, software |
10 | distributed under the License is distributed on an "AS IS" BASIS, |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | See the License for the specific language governing permissions and |
13 | limitations under the License. |
14 | ==============================================================================*/ |
15 | |
16 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_LOCAL_DEVICE_H_ |
17 | #define TENSORFLOW_CORE_COMMON_RUNTIME_LOCAL_DEVICE_H_ |
18 | |
19 | #include "tensorflow/core/common_runtime/device.h" |
20 | #include "tensorflow/core/framework/device_attributes.pb.h" |
21 | #include "tensorflow/core/platform/macros.h" |
22 | |
23 | namespace tensorflow { |
24 | |
25 | namespace test { |
26 | class Benchmark; |
27 | } |
28 | struct SessionOptions; |
29 | |
30 | // This class is shared by ThreadPoolDevice and GPUDevice and |
31 | // initializes a shared Eigen compute device used by both. This |
32 | // should eventually be removed once we refactor ThreadPoolDevice and |
33 | // GPUDevice into more 'process-wide' abstractions. |
34 | class LocalDevice : public Device { |
35 | public: |
36 | LocalDevice(const SessionOptions& options, |
37 | const DeviceAttributes& attributes); |
38 | ~LocalDevice() override; |
39 | |
40 | private: |
41 | static bool use_global_threadpool_; |
42 | |
43 | static void set_use_global_threadpool(bool use_global_threadpool) { |
44 | use_global_threadpool_ = use_global_threadpool; |
45 | } |
46 | |
47 | struct EigenThreadPoolInfo; |
48 | std::unique_ptr<EigenThreadPoolInfo> owned_tp_info_; |
49 | |
50 | // All ThreadPoolDevices in the process associated with the same |
51 | // NUMA node will share a single fixed sized threadpool for numerical |
52 | // computations. |
53 | static mutex global_tp_mu_; |
54 | static gtl::InlinedVector<EigenThreadPoolInfo*, 4> global_tp_info_ |
55 | TF_GUARDED_BY(global_tp_mu_); |
56 | |
57 | friend class test::Benchmark; |
58 | |
59 | TF_DISALLOW_COPY_AND_ASSIGN(LocalDevice); |
60 | }; |
61 | |
62 | } // namespace tensorflow |
63 | |
64 | #endif // TENSORFLOW_CORE_COMMON_RUNTIME_LOCAL_DEVICE_H_ |
65 | |