1#ifndef CAFFE2_CORE_COMMON_H_
2#define CAFFE2_CORE_COMMON_H_
3
4#include <algorithm>
5#include <cmath>
6#include <map>
7#include <memory>
8#include <numeric>
9#include <set>
10#include <sstream>
11#include <string>
12#include <type_traits>
13#include <vector>
14
15#ifdef __APPLE__
16#include <TargetConditionals.h>
17#endif
18
19#if defined(_MSC_VER)
20#include <io.h>
21#else
22#include <unistd.h>
23#endif
24
25// Macros used during the build of this caffe2 instance. This header file
26// is automatically generated by the cmake script during build.
27#include "caffe2/core/macros.h"
28
29#include <c10/macros/Macros.h>
30
31#include "c10/util/string_utils.h"
32
33namespace caffe2 {
34
35// Note(Yangqing): NVCC does not play well with unordered_map on some platforms,
36// forcing us to use std::map instead of unordered_map. This may affect speed
37// in some cases, but in most of the computation code we do not access map very
38// often, so it should be fine for us. I am putting a CaffeMap alias so we can
39// change it more easily if things work out for unordered_map down the road.
40template <typename Key, typename Value>
41using CaffeMap = std::map<Key, Value>;
42// using CaffeMap = std::unordered_map;
43
44// Using statements for common classes that we refer to in caffe2 very often.
45// Note that we only place it inside caffe2 so the global namespace is not
46// polluted.
47/* using override */
48using std::set;
49using std::string;
50using std::unique_ptr;
51using std::vector;
52
53// Just in order to mark things as not implemented. Do not use in final code.
54#define CAFFE_NOT_IMPLEMENTED CAFFE_THROW("Not Implemented.")
55
56// suppress an unused variable.
57#if defined(_MSC_VER) && !defined(__clang__)
58#define CAFFE2_UNUSED __pragma(warning(suppress : 4100 4101))
59#define CAFFE2_USED
60#else
61#define CAFFE2_UNUSED __attribute__((__unused__))
62#define CAFFE2_USED __attribute__((__used__))
63#endif //_MSC_VER
64
65// Define alignment macro that is cross platform
66#if defined(_MSC_VER) && !defined(__clang__)
67#define CAFFE2_ALIGNED(x) __declspec(align(x))
68#else
69#define CAFFE2_ALIGNED(x) __attribute__((aligned(x)))
70#endif
71
72#if (defined _MSC_VER && !defined NOMINMAX)
73#define NOMINMAX
74#endif
75
76#if defined(__has_cpp_attribute)
77#if __has_cpp_attribute(nodiscard)
78#define CAFFE2_NODISCARD [[nodiscard]]
79#endif
80#endif
81#if !defined(CAFFE2_NODISCARD)
82#define CAFFE2_NODISCARD
83#endif
84
85using std::make_unique;
86
87#if defined(__ANDROID__) && !defined(__NDK_MAJOR__)
88using ::round;
89#else
90using std::round;
91#endif // defined(__ANDROID__) && !defined(__NDK_MAJOR__)
92
93// dynamic cast reroute: if RTTI is disabled, go to reinterpret_cast
94template <typename Dst, typename Src>
95inline Dst dynamic_cast_if_rtti(Src ptr) {
96#ifdef __GXX_RTTI
97 return dynamic_cast<Dst>(ptr);
98#else
99 return static_cast<Dst>(ptr);
100#endif
101}
102
103// SkipIndices are used in operator_fallback_gpu.h and operator_fallback_mkl.h
104// as utility functions that marks input / output indices to skip when we use a
105// CPU operator as the fallback of GPU/MKL operator option.
106template <int... values>
107class SkipIndices {
108 private:
109 template <int V>
110 static inline bool ContainsInternal(const int i) {
111 return (i == V);
112 }
113 template <int First, int Second, int... Rest>
114 static inline bool ContainsInternal(const int i) {
115 return (i == First) || ContainsInternal<Second, Rest...>(i);
116 }
117
118 public:
119 static inline bool Contains(const int i) {
120 return ContainsInternal<values...>(i);
121 }
122};
123
124template <>
125class SkipIndices<> {
126 public:
127 static inline bool Contains(const int /*i*/) {
128 return false;
129 }
130};
131
132// HasCudaRuntime() tells the program whether the binary has Cuda runtime
133// linked. This function should not be used in static initialization functions
134// as the underlying boolean variable is going to be switched on when one
135// loads libtorch_gpu.so.
136TORCH_API bool HasCudaRuntime();
137TORCH_API bool HasHipRuntime();
138namespace internal {
139// Sets the Cuda Runtime flag that is used by HasCudaRuntime(). You should
140// never use this function - it is only used by the Caffe2 gpu code to notify
141// Caffe2 core that cuda runtime has been loaded.
142TORCH_API void SetCudaRuntimeFlag();
143TORCH_API void SetHipRuntimeFlag();
144} // namespace internal
145// Returns which setting Caffe2 was configured and built with (exported from
146// CMake)
147TORCH_API const std::map<string, string>& GetBuildOptions();
148
149} // namespace caffe2
150
151#endif // CAFFE2_CORE_COMMON_H_
152