1#include <ATen/MapAllocator.h>
2
3#include <atomic>
4#include <string>
5#include <random>
6#if ATOMIC_INT_LOCK_FREE == 2
7#define AT_ATOMIC_IPC_REFCOUNT 1
8#endif
9
10#include <c10/core/CPUAllocator.h>
11#include <c10/util/C++17.h>
12#include <c10/util/Unicode.h>
13
14/* stuff for mapped files */
15#ifdef _WIN32
16#include <c10/util/win32-headers.h>
17#endif
18
19#if defined(HAVE_MMAP)
20#include <sys/mman.h>
21#include <sys/stat.h>
22#include <fcntl.h>
23#endif
24
25#if !defined(_MSC_VER) || defined(HAVE_MMAP)
26#include <sys/types.h>
27#include <unistd.h>
28#elif defined(_MSC_VER)
29#include <c10/util/win32-headers.h>
30#endif
31
32namespace at {
33
34static constexpr int64_t map_alloc_alignment = 64;
35
36TORCH_API std::string NewProcessWideShmHandle()
37{
38 static std::atomic<uint64_t> counter{0};
39 static std::random_device rd;
40 std::string handle = "/torch_";
41#ifdef _MSC_VER
42 handle += c10::guts::to_string(GetCurrentProcessId());
43#else
44 handle += c10::guts::to_string(getpid());
45#endif
46 handle += "_";
47 handle += c10::guts::to_string(rd());
48 handle += "_";
49 handle += c10::guts::to_string(counter.fetch_add(1, std::memory_order_relaxed));
50 return handle;
51}
52
53#if defined(_WIN32) || defined(HAVE_MMAP)
54
55namespace {
56struct MapInfo {
57 std::atomic<int> refcount;
58};
59
60const std::string unknown_filename = "filename not specified";
61#ifdef _WIN32
62const std::string unknown_eventname = "eventname not specified";
63#endif
64} // namespace (anonymous)
65
66MapAllocator::MapAllocator(WithFd, std::string filename, int fd, int flags, size_t size)
67 : filename_(filename.empty() ? unknown_filename : std::move(filename))
68 , flags_(0) // to be filled later
69 , size_(0) // to be filled later
70#ifdef _WIN32
71 , handle_(INVALID_HANDLE_VALUE) // to be filled later
72 , event_(INVALID_HANDLE_VALUE) // to be filled later
73 , eventname_(filename.empty() ? unknown_eventname : (filename + "_event"))
74#else
75 , fd_(fd)
76#endif
77 , base_ptr_(nullptr)
78{
79
80 if (!(flags & ALLOCATOR_MAPPED_SHARED) && !(flags & ALLOCATOR_MAPPED_SHAREDMEM)) {
81 flags &= ~ALLOCATOR_MAPPED_NOCREATE;
82 }
83 if ((flags ^ ALLOCATOR_MAPPED_EXCLUSIVE) == 0) {
84 TORCH_CHECK(false, "ALLOCATOR_MAPPED_EXCLUSIVE flag requires opening the file in shared mode");
85 }
86#ifdef _WIN32
87 if (fd != -1) {
88 TORCH_CHECK(false, "MapAllocator_newWithFd is unsupported on Windows");
89 }
90#endif
91 flags_ = flags;
92
93 // OK, now do the allocation
94
95 if (size == 0) {
96 return;
97 }
98
99#ifdef _WIN32
100 if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
101 // Shadowing
102 const wchar_t *filename;
103 const wchar_t *eventname;
104 const std::wstring wFilename = c10::u8u16(filename_);
105 const std::wstring wEventname = c10::u8u16(eventname_);
106 LARGE_INTEGER hfilesz;
107
108 if (filename_[0] == '/') {
109 filename = wFilename.c_str() + 1;
110 eventname = wEventname.c_str() + 1;
111 } else {
112 filename = wFilename.c_str();
113 eventname = wEventname.c_str();
114 }
115
116 hfilesz.QuadPart = size;
117
118 if (flags_ & ALLOCATOR_MAPPED_EXCLUSIVE) {
119 event_ = CreateEventW(nullptr, FALSE, FALSE, eventname);
120 } else if (flags_ & ALLOCATOR_MAPPED_NOCREATE) {
121 event_ = OpenEventW(EVENT_ALL_ACCESS, FALSE, eventname);
122 } else {
123 TORCH_CHECK(false, "Expected either ALLOCATOR_MAPPED_EXCLUSIVE or ALLOCATOR_MAPPED_NOCREATE");
124 }
125
126 if (event_ == nullptr) {
127 TORCH_CHECK(false, "Couldn't open shared event: <", eventname, ">, error code: <", GetLastError(), ">");
128 }
129
130 if (flags_ & ALLOCATOR_MAPPED_EXCLUSIVE) {
131 handle_ = CreateFileMappingW(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE, hfilesz.HighPart, hfilesz.LowPart, filename);
132 } else if (flags_ & ALLOCATOR_MAPPED_NOCREATE) {
133 handle_ = OpenFileMappingW(FILE_MAP_ALL_ACCESS, FALSE, filename);
134 } else {
135 TORCH_CHECK(false, "Expected either ALLOCATOR_MAPPED_EXCLUSIVE or ALLOCATOR_MAPPED_NOCREATE");
136 }
137
138 if (handle_ == nullptr) {
139 TORCH_CHECK(false, "Couldn't open shared file mapping: <", filename, ">, error code: <", GetLastError(), ">");
140 }
141
142 size_ = size;
143 base_ptr_ = MapViewOfFile(handle_, FILE_MAP_ALL_ACCESS, 0, 0, size);
144 if (!base_ptr_) {
145 TORCH_CHECK(false, "Couldn't map view of shared file <", filename, ">, error code: <", GetLastError(), ">");
146 }
147 } else {
148
149 HANDLE hfile;
150 HANDLE hmfile;
151 LARGE_INTEGER hfilesz;
152
153 if (flags_ & ALLOCATOR_MAPPED_EXCLUSIVE) {
154 TORCH_CHECK(false, "exclusive file mapping is not supported on Windows");
155 }
156 if (flags_ & ALLOCATOR_MAPPED_NOCREATE) {
157 TORCH_CHECK(false, "file mapping without creation is not supported on Windows");
158 }
159 if (flags_ & ALLOCATOR_MAPPED_KEEPFD) {
160 TORCH_CHECK(false, "ALLOCATOR_MAPPED_KEEPFD not supported on Windows");
161 }
162 if (flags_ & ALLOCATOR_MAPPED_FROMFD) {
163 TORCH_CHECK(false, "ALLOCATOR_MAPPED_FROMFD not supported on Windows");
164 }
165
166 // Shadowing
167 const wchar_t *filename;
168 const std::wstring wFilename = c10::u8u16(filename_);
169
170 filename = wFilename.c_str();
171
172 /* open file */
173 /* FILE_FLAG_RANDOM_ACCESS ? */
174 if (flags_) {
175 hfile = CreateFileW(filename, GENERIC_READ|GENERIC_WRITE, FILE_SHARE_WRITE|FILE_SHARE_READ, 0, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0);
176 if (hfile == INVALID_HANDLE_VALUE) {
177 TORCH_CHECK(false, "could not open file <", filename_, "> in read-write mode; error code: <", GetLastError(), ">");
178 }
179 } else {
180 hfile = CreateFileW(filename, GENERIC_READ, FILE_SHARE_WRITE|FILE_SHARE_READ, 0, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0);
181 if (hfile == INVALID_HANDLE_VALUE) {
182 TORCH_CHECK(false, "could not open file <", filename_, "> in read-only mode; error code: <", GetLastError(), ">");
183 }
184 }
185
186 if (GetFileSizeEx(hfile, &hfilesz) == 0) {
187 TORCH_CHECK(false, "could not get file size: <", filename_, ">; error code: <", GetLastError(), ">");
188 }
189
190 if (size > 0) {
191 if (size > hfilesz.QuadPart) {
192 if (flags_) {
193 hfilesz.QuadPart = size;
194 if (SetFilePointerEx(hfile, hfilesz, NULL, FILE_BEGIN) == 0) {
195 CloseHandle(hfile);
196 TORCH_CHECK(false, "unable to stretch file <", filename_, "> to the right size; error code: <", GetLastError(), ">", filename_);
197 }
198 if (SetEndOfFile(hfile) == 0) {
199 CloseHandle(hfile);
200 TORCH_CHECK(false, "unable to write to file <", filename_, ">; error code: <", GetLastError(), ">");
201 }
202 } else {
203 CloseHandle(hfile);
204 TORCH_CHECK(false, "file <", filename_, "> size is smaller than the required mapping size <", size, ">; error code: <", GetLastError(), ">");
205 }
206 }
207 } else {
208 size = hfilesz.QuadPart;
209 }
210
211 size_ = size; /* if we are here, it must be the right size */
212
213 hfilesz.QuadPart = size_;
214
215 /* get map handle */
216 if (flags_) {
217 if ( (hmfile = CreateFileMappingW(hfile, NULL, PAGE_READWRITE, hfilesz.HighPart, hfilesz.LowPart, NULL)) == NULL ) {
218 TORCH_CHECK(false, "could not create a map on file <", filename_, ">; error code: <", GetLastError(), ">");
219 }
220 } else {
221 if ( (hmfile = CreateFileMappingW(hfile, NULL, PAGE_WRITECOPY, hfilesz.HighPart, hfilesz.LowPart, NULL)) == NULL ) {
222 TORCH_CHECK(false, "could not create a map on file <", filename_, ">; error code: <", GetLastError(), ">");
223 }
224 }
225
226 /* map the stuff */
227 if(flags_) {
228 base_ptr_ = MapViewOfFile(hmfile, FILE_MAP_ALL_ACCESS, 0, 0, 0);
229 } else {
230 base_ptr_ = MapViewOfFile(hmfile, FILE_MAP_COPY, 0, 0, 0);
231 }
232
233 CloseHandle(hfile);
234 CloseHandle(hmfile);
235 }
236#else /* _WIN32 */
237 {
238 /* open file */
239 int fd{-1};
240 int flags{}; // shadow
241
242 if (flags_ & (ALLOCATOR_MAPPED_SHARED | ALLOCATOR_MAPPED_SHAREDMEM)) {
243 flags = O_RDWR | O_CREAT;
244 } else {
245 flags = O_RDONLY;
246 }
247
248 if (flags_ & ALLOCATOR_MAPPED_EXCLUSIVE) {
249 flags |= O_EXCL;
250 }
251 if (flags_ & ALLOCATOR_MAPPED_NOCREATE) {
252 flags &= ~O_CREAT;
253 }
254
255 if (!(flags_ & ALLOCATOR_MAPPED_FROMFD)) {
256 if (flags_ & ALLOCATOR_MAPPED_SHARED) {
257 if ((fd = open(filename_.c_str(), flags, (mode_t)0600)) == -1) {
258 TORCH_CHECK(false, "unable to open file <", filename_, "> in read-write mode: ", strerror(errno), " (", errno, ")");
259 }
260 } else if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
261#ifdef HAVE_SHM_OPEN
262 if((fd = shm_open(filename_.c_str(), flags, (mode_t)0600)) == -1) {
263 TORCH_CHECK(false, "unable to open shared memory object <", filename_, "> in read-write mode: ", strerror(errno), " (", errno, ")");
264 }
265#else
266 TORCH_CHECK(false, "unable to open file <", filename_, "> in sharedmem mode, shm_open unavailable on this platform");
267#endif
268 } else {
269 if ((fd = open(filename_.c_str(), O_RDONLY)) == -1) {
270 TORCH_CHECK(false, "unable to open file <", filename_, "> in read-only mode: ", strerror(errno), " (", errno, ")");
271 }
272 }
273 } else {
274 fd = fd_;
275 }
276
277 struct stat file_stat;
278 if (fstat(fd, &file_stat) == -1) {
279 int last_err = errno;
280 if (!(flags_ & ALLOCATOR_MAPPED_FROMFD)) {
281 ::close(fd);
282 }
283 TORCH_CHECK(false, "unable to stat the file <", filename_, ">: ", strerror(last_err), " (", last_err, ")");
284 }
285
286 if (size > 0) {
287 if (static_cast<int64_t>(size) > file_stat.st_size) {
288 if (flags_) {
289 if (ftruncate(fd, size) == -1) {
290 TORCH_CHECK(false, "unable to resize file <", filename_, "> to the right size: ", strerror(errno), " (", errno, ")");
291 }
292 if (fstat(fd, &file_stat) == -1 || file_stat.st_size < static_cast<int64_t>(size)) {
293 int last_err = errno;
294 ::close(fd);
295 TORCH_CHECK(false, "unable to stretch file <", filename_, "> to the right size: ", strerror(last_err), " (", last_err, ")");
296 }
297/* on macOS write returns with errno 45 (Opperation not supported) when used
298 * with a file descriptor obtained via shm_open
299 */
300#ifndef __APPLE__
301 if ((write(fd, "", 1)) != 1) /* note that the string "" contains the '\0' byte ... */ {
302 int last_err = errno;
303 ::close(fd);
304 TORCH_CHECK(false, "unable to write to file <", filename_, ">: ", strerror(last_err), " (", last_err, ")");
305 }
306#endif
307 } else {
308 ::close(fd);
309 TORCH_CHECK(false, "file <", filename_, "> size is smaller than the required mapping size <", size, ">");
310 }
311 }
312 } else {
313 size = file_stat.st_size;
314 }
315
316 size_ = size; /* if we are here, it must be the right size */
317
318 /* map it */
319 if (flags_ & (ALLOCATOR_MAPPED_SHARED | ALLOCATOR_MAPPED_SHAREDMEM)) {
320 base_ptr_ = mmap(nullptr, size_, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
321 } else {
322 base_ptr_ = mmap(nullptr, size_, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
323 }
324
325 if (base_ptr_ == MAP_FAILED) {
326 base_ptr_ = nullptr; /* let's be sure it is NULL */
327 TORCH_CHECK(false, "unable to mmap ", size_, " bytes from file <", filename_, ">: ", strerror(errno), " (", errno, ")");
328 }
329
330 if (flags_ & ALLOCATOR_MAPPED_KEEPFD) {
331 fd_ = fd;
332 } else {
333 if (::close(fd) == -1) {
334 TORCH_CHECK(false, "Error closing file <", filename_, ">: ", strerror(errno), " (", errno, ")");
335 }
336 fd_ = -1;
337 }
338
339 if (flags_ & ALLOCATOR_MAPPED_UNLINK) {
340 if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
341#ifdef HAVE_SHM_UNLINK
342 if (shm_unlink(filename_.c_str()) == -1) {
343 TORCH_CHECK(false, "could not unlink the shared memory file ", filename_, " : ", strerror(errno), " (", errno, ")");
344 }
345#else
346 TORCH_CHECK(false, "could not unlink the shared memory file ", filename_, ", shm_unlink not available on platform");
347#endif
348 } else {
349 if (unlink(filename_.c_str()) == -1)
350 TORCH_CHECK(false, "could not unlink file ", filename_, " : ", strerror(errno), " (", errno, ")");
351 }
352 }
353
354 if (base_ptr_ == MAP_FAILED) {
355 TORCH_CHECK(false, "$ Torch: unable to mmap memory: you tried to mmap ", size_/1073741824, " GB.");
356 }
357 }
358#endif
359 c10::reportMemoryUsageToProfiler(base_ptr_, size_, 0, size_, c10::Device(c10::DeviceType::CPU));
360}
361
362MapAllocator::MapAllocator(std::string filename, int flags, size_t size)
363 : MapAllocator(WITH_FD, std::move(filename), -1, flags, size)
364{}
365
366#ifdef _WIN32
367struct ReleaseContext {
368 HANDLE event;
369 HANDLE handle;
370 HANDLE wait;
371};
372static void CALLBACK WaitForReleaseHandle(PVOID lpParam, BOOLEAN TimerOrWaitFired)
373{
374 if (lpParam) {
375 ReleaseContext *ctx = (ReleaseContext *)lpParam;
376
377 SetEvent(ctx->event);
378 CloseHandle(ctx->event);
379 CloseHandle(ctx->handle);
380
381 UnregisterWait(ctx->wait);
382
383 delete ctx;
384 }
385}
386#endif
387
388void MapAllocator::close() {
389 if (closed_) {
390 return;
391 }
392 closed_ = true;
393 if (base_ptr_ == nullptr) {
394 return;
395 }
396#ifdef _WIN32
397 if ((flags_ & ALLOCATOR_MAPPED_KEEPFD) || (flags_ & ALLOCATOR_MAPPED_SHAREDMEM))
398 CloseHandle(handle_);
399 if(UnmapViewOfFile(base_ptr_) == 0)
400 TORCH_CHECK(false, "could not unmap the shared memory file");
401#else /* _WIN32 */
402 if (flags_ & ALLOCATOR_MAPPED_KEEPFD) {
403 if (::close(fd_) == -1) {
404 TORCH_CHECK(false, "could not close file descriptor ", fd_, " :", strerror(errno), " (", errno, ")" );
405 }
406 }
407
408 if (munmap(base_ptr_, size_)) {
409 TORCH_CHECK(false, "could not unmap the shared memory file: ", strerror(errno), " (", errno, ")");
410 }
411
412 if (!(flags_ & (ALLOCATOR_MAPPED_FROMFD | ALLOCATOR_MAPPED_UNLINK))) {
413 if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
414#ifdef HAVE_SHM_UNLINK
415 if (shm_unlink(filename_.c_str()) == -1) {
416 TORCH_CHECK(false, "could not unlink the shared memory file ", filename_, " : ", strerror(errno), " (", errno, ")");
417 }
418#else
419 TORCH_CHECK(false, "could not unlink the shared memory file ", filename_, ", shm_unlink not available on platform");
420#endif
421 }
422 }
423#endif /* _WIN32 */
424}
425
426#else /* defined(_WIN32) || defined(HAVE_MMAP) */
427
428MapAllocator::MapAllocator(std::string filename, int flags, size_t size) {
429 TORCH_CHECK(false, "file mapping not supported on your system");
430}
431
432MapAllocator::MapAllocator(WithFd, std::string filename, int fd, int flags, size_t size) {
433 TORCH_CHECK(false, "file mapping not supported on your system");
434}
435
436void MapAllocator::close() { }
437
438#endif
439
440#if (defined(_WIN32) || defined(HAVE_MMAP)) && defined(AT_ATOMIC_IPC_REFCOUNT)
441
442RefcountedMapAllocatorArgCheck::RefcountedMapAllocatorArgCheck(int flags) {
443 if (flags & ALLOCATOR_MAPPED_FROMFD) {
444 TORCH_CHECK(false, "RefcountedMapAllocator doesn't support ALLOCATOR_MAPPED_FROMFD flag");
445 }
446 if (flags & ALLOCATOR_MAPPED_KEEPFD) {
447 TORCH_CHECK(false, "RefcountedMapAllocator doesn't support ALLOCATOR_MAPPED_KEEPFD flag");
448 }
449 if (flags & ALLOCATOR_MAPPED_UNLINK) {
450 TORCH_CHECK(false, "RefcountedMapAllocator doesn't support ALLOCATOR_MAPPED_UNLINK flag");
451 }
452 if (!(flags & ALLOCATOR_MAPPED_SHAREDMEM)) {
453 TORCH_CHECK(false, "RefcountedMapAllocator requires ALLOCATOR_MAPPED_SHAREDMEM flag");
454 }
455}
456
457RefcountedMapAllocator::RefcountedMapAllocator(const char *filename, int flags, size_t size)
458 : RefcountedMapAllocatorArgCheck(flags)
459 , MapAllocator(filename, flags, size + map_alloc_alignment) {
460
461 initializeAlloc();
462}
463RefcountedMapAllocator::RefcountedMapAllocator(WithFd, const char *filename, int fd, int flags, size_t size)
464 : RefcountedMapAllocatorArgCheck(flags)
465 , MapAllocator(WITH_FD, filename, flags, fd, size + map_alloc_alignment) {
466
467 initializeAlloc();
468}
469
470void RefcountedMapAllocator::initializeAlloc() {
471 TORCH_CHECK(base_ptr_, "base_ptr_ is null");
472 MapInfo *map_info = (MapInfo*)base_ptr_;
473
474#ifdef _WIN32
475 ReleaseContext* r_ctx = new ReleaseContext;
476 r_ctx->handle = handle_;
477 r_ctx->event = event_;
478 r_ctx->wait = NULL;
479 BOOL can_wait = RegisterWaitForSingleObject(&r_ctx->wait, event_, WaitForReleaseHandle, (PVOID)r_ctx, INFINITE, WT_EXECUTEONLYONCE);
480 TORCH_CHECK(can_wait, "Couldn't register wait on event, error code: <", GetLastError(), ">");
481#endif
482
483 if (flags_ & ALLOCATOR_MAPPED_EXCLUSIVE) {
484 new (&map_info->refcount) std::atomic<int>(1);
485 } else {
486 map_info->refcount++;
487 }
488}
489
490void RefcountedMapAllocator::close() {
491 if (closed_) {
492 return;
493 }
494 closed_ = true;
495
496 void* data = base_ptr_;
497
498#ifdef _WIN32
499 MapInfo *info = (MapInfo*)data;
500 if (--info->refcount == 0) {
501 SetEvent(event_);
502 }
503 if(UnmapViewOfFile(data) == 0) {
504 TORCH_CHECK(false, "could not unmap the shared memory file");
505 }
506#else /* _WIN32 */
507
508 MapInfo *info = (MapInfo*)(data);
509 if (--info->refcount == 0) {
510#ifdef HAVE_SHM_UNLINK
511 if (shm_unlink(filename_.c_str()) == -1) {
512 TORCH_CHECK(false, "could not unlink the shared memory file ", filename_);
513 }
514#else
515 TORCH_CHECK(false, "could not unlink the shared memory file ", filename_, ", shm_unlink not available on platform");
516#endif /* HAVE_SHM_UNLINK */
517 }
518 if (munmap(info, size_)) {
519 TORCH_CHECK(false, "could not unmap the shared memory file ", filename_);
520 }
521#endif /* _WIN32 */
522}
523
524void RefcountedMapAllocator::incref()
525{
526 MapInfo *map_info = static_cast<MapInfo*>(base_ptr_);
527 ++map_info->refcount;
528}
529
530int RefcountedMapAllocator::decref()
531{
532 MapInfo *map_info = static_cast<MapInfo*>(base_ptr_);
533 return --map_info->refcount == 0;
534}
535
536#else
537
538
539RefcountedMapAllocatorArgCheck::RefcountedMapAllocatorArgCheck(int flags) {}
540
541RefcountedMapAllocator::RefcountedMapAllocator(const char *filename, int flags, size_t size)
542 : RefcountedMapAllocatorArgCheck(flags),
543 MapAllocator(filename, flags, size + map_alloc_alignment)
544{
545 TORCH_CHECK(false, "refcounted file mapping not supported on your system");
546}
547
548RefcountedMapAllocator::RefcountedMapAllocator(WithFd, const char *filename, int fd, int flags, size_t size)
549 : RefcountedMapAllocatorArgCheck(flags),
550 MapAllocator(WITH_FD, filename, flags, fd, size + map_alloc_alignment)
551{
552 TORCH_CHECK(false, "refcounted file mapping not supported on your system");
553}
554
555void RefcountedMapAllocator::initializeAlloc() {}
556
557void RefcountedMapAllocator::close() {}
558
559#endif
560
561static void deleteMapAllocator(void* ptr) {
562 delete static_cast<MapAllocator*>(ptr);
563}
564
565static void deleteRefcountedMapAllocator(void* ptr) {
566 delete static_cast<RefcountedMapAllocator*>(ptr);
567}
568
569MapAllocator* MapAllocator::fromDataPtr(const at::DataPtr& dptr) {
570 return dptr.cast_context<MapAllocator>(&deleteMapAllocator);
571}
572
573RefcountedMapAllocator* RefcountedMapAllocator::fromDataPtr(const at::DataPtr& dptr) {
574 return dptr.cast_context<RefcountedMapAllocator>(&deleteRefcountedMapAllocator);
575}
576
577at::DataPtr MapAllocator::makeDataPtr(std::string filename, int flags, size_t size, size_t* actual_size_out) {
578 auto* context = new MapAllocator(std::move(filename), flags, size);
579 if (actual_size_out) *actual_size_out = context->size();
580 return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU};
581}
582
583at::DataPtr MapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
584 auto* context = new MapAllocator(WITH_FD, filename, fd, flags, size);
585 if (actual_size_out) *actual_size_out = context->size();
586 return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU};
587}
588
589at::DataPtr RefcountedMapAllocator::makeDataPtr(const char *filename, int flags, size_t size, size_t* actual_size_out) {
590 auto* context = new RefcountedMapAllocator(filename, flags, size);
591 if (actual_size_out) *actual_size_out = context->size() - map_alloc_alignment;
592 return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU};
593}
594
595at::DataPtr RefcountedMapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
596 auto* context = new RefcountedMapAllocator(WITH_FD, filename, fd, flags, size);
597 if (actual_size_out) *actual_size_out = context->size() - map_alloc_alignment;
598 return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU};
599}
600
601void* RefcountedMapAllocator::data() const {
602 return static_cast<void*>(static_cast<char*>(base_ptr_) + map_alloc_alignment);
603}
604
605MapAllocator::~MapAllocator() {
606 // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall)
607 close();
608 c10::reportMemoryUsageToProfiler(base_ptr_, -size_, 0, 0, c10::Device(c10::DeviceType::CPU));
609}
610
611} // namespace at
612