1#ifndef C10_MACROS_EXPORT_H_
2#define C10_MACROS_EXPORT_H_
3
4/* Header file to define the common scaffolding for exported symbols.
5 *
6 * Export is by itself a quite tricky situation to deal with, and if you are
7 * hitting this file, make sure you start with the background here:
8 * - Linux: https://gcc.gnu.org/wiki/Visibility
9 * - Windows:
10 * https://docs.microsoft.com/en-us/cpp/cpp/dllexport-dllimport?view=vs-2017
11 *
12 * Do NOT include this file directly. Instead, use c10/macros/Macros.h
13 */
14
15// You do not need to edit this part of file unless you are changing the core
16// pytorch export abstractions.
17//
18// This part defines the C10 core export and import macros. This is controlled
19// by whether we are building shared libraries or not, which is determined
20// during build time and codified in c10/core/cmake_macros.h.
21// When the library is built as a shared lib, EXPORT and IMPORT will contain
22// visibility attributes. If it is being built as a static lib, then EXPORT
23// and IMPORT basically have no effect.
24
25// As a rule of thumb, you should almost NEVER mix static and shared builds for
26// libraries that depend on c10. AKA, if c10 is built as a static library, we
27// recommend everything dependent on c10 to be built statically. If c10 is built
28// as a shared library, everything dependent on it should be built as shared. In
29// the PyTorch project, all native libraries shall use the macro
30// C10_BUILD_SHARED_LIB to check whether pytorch is building shared or static
31// libraries.
32
33// For build systems that do not directly depend on CMake and directly build
34// from the source directory (such as Buck), one may not have a cmake_macros.h
35// file at all. In this case, the build system is responsible for providing
36// correct macro definitions corresponding to the cmake_macros.h.in file.
37//
38// In such scenarios, one should define the macro
39// C10_USING_CUSTOM_GENERATED_MACROS
40// to inform this header that it does not need to include the cmake_macros.h
41// file.
42
43#ifndef C10_USING_CUSTOM_GENERATED_MACROS
44#include <c10/macros/cmake_macros.h>
45#endif // C10_USING_CUSTOM_GENERATED_MACROS
46
47#ifdef _WIN32
48#define C10_HIDDEN
49#if defined(C10_BUILD_SHARED_LIBS)
50#define C10_EXPORT __declspec(dllexport)
51#define C10_IMPORT __declspec(dllimport)
52#else
53#define C10_EXPORT
54#define C10_IMPORT
55#endif
56#else // _WIN32
57#if defined(__GNUC__)
58#define C10_EXPORT __attribute__((__visibility__("default")))
59#define C10_HIDDEN __attribute__((__visibility__("hidden")))
60#else // defined(__GNUC__)
61#define C10_EXPORT
62#define C10_HIDDEN
63#endif // defined(__GNUC__)
64#define C10_IMPORT C10_EXPORT
65#endif // _WIN32
66
67#ifdef NO_EXPORT
68#undef C10_EXPORT
69#define C10_EXPORT
70#endif
71
72// Definition of an adaptive XX_API macro, that depends on whether you are
73// building the library itself or not, routes to XX_EXPORT and XX_IMPORT.
74// Basically, you will need to do this for each shared library that you are
75// building, and the instruction is as follows: assuming that you are building
76// a library called libawesome.so. You should:
77// (1) for your cmake target (usually done by "add_library(awesome, ...)"),
78// define a macro called AWESOME_BUILD_MAIN_LIB using
79// target_compile_options.
80// (2) define the AWESOME_API macro similar to the one below.
81// And in the source file of your awesome library, use AWESOME_API to
82// annotate public symbols.
83
84// Here, for the C10 library, we will define the macro C10_API for both import
85// and export.
86
87// This one is being used by libc10.so
88#ifdef C10_BUILD_MAIN_LIB
89#define C10_API C10_EXPORT
90#else
91#define C10_API C10_IMPORT
92#endif
93
94// This one is being used by libtorch.so
95#ifdef CAFFE2_BUILD_MAIN_LIB
96#define TORCH_API C10_EXPORT
97#else
98#define TORCH_API C10_IMPORT
99#endif
100
101// You may be wondering: Whose brilliant idea was it to split torch_cuda into
102// two pieces with confusing names?
103// Once upon a time, there _was_ only TORCH_CUDA_API. All was happy until we
104// tried to compile PyTorch for CUDA 11.1, which ran into relocation marker
105// issues when linking big binaries.
106// (https://github.com/pytorch/pytorch/issues/39968) We had two choices:
107// (1) Stop supporting so many GPU architectures
108// (2) Do something else
109// We chose #2 and decided to split the behemoth that was torch_cuda into two
110// smaller libraries, one with most of the core kernel functions (torch_cuda_cu)
111// and the other that had..well..everything else (torch_cuda_cpp). The idea was
112// this: instead of linking our static libraries (like the hefty
113// libcudnn_static.a) with another huge library, torch_cuda, and run into pesky
114// relocation marker issues, we could link our static libraries to a smaller
115// part of torch_cuda (torch_cuda_cpp) and avoid the issues.
116
117// libtorch_cuda_cu.so
118#ifdef TORCH_CUDA_CU_BUILD_MAIN_LIB
119#define TORCH_CUDA_CU_API C10_EXPORT
120#elif defined(BUILD_SPLIT_CUDA)
121#define TORCH_CUDA_CU_API C10_IMPORT
122#endif
123
124// libtorch_cuda_cpp.so
125#ifdef TORCH_CUDA_CPP_BUILD_MAIN_LIB
126#define TORCH_CUDA_CPP_API C10_EXPORT
127#elif defined(BUILD_SPLIT_CUDA)
128#define TORCH_CUDA_CPP_API C10_IMPORT
129#endif
130
131// libtorch_cuda.so (where torch_cuda_cu and torch_cuda_cpp are a part of the
132// same api)
133#ifdef TORCH_CUDA_BUILD_MAIN_LIB
134#define TORCH_CUDA_CPP_API C10_EXPORT
135#define TORCH_CUDA_CU_API C10_EXPORT
136#elif !defined(BUILD_SPLIT_CUDA)
137#define TORCH_CUDA_CPP_API C10_IMPORT
138#define TORCH_CUDA_CU_API C10_IMPORT
139#endif
140
141#if defined(TORCH_HIP_BUILD_MAIN_LIB)
142#define TORCH_HIP_API C10_EXPORT
143#else
144#define TORCH_HIP_API C10_IMPORT
145#endif
146
147// Enums only need to be exported on windows for non-CUDA files
148#if defined(_WIN32) && defined(__CUDACC__)
149#define C10_API_ENUM C10_API
150#else
151#define C10_API_ENUM
152#endif
153
154#endif // C10_MACROS_MACROS_H_
155