1// Support for atomic operations -*- C++ -*-
2
3// Copyright (C) 2004-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file ext/atomicity.h
26 * This file is a GNU extension to the Standard C++ Library.
27 */
28
29#ifndef _GLIBCXX_ATOMICITY_H
30#define _GLIBCXX_ATOMICITY_H 1
31
32#pragma GCC system_header
33
34#include <bits/c++config.h>
35#include <bits/gthr.h>
36#include <bits/atomic_word.h>
37
38namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
39{
40_GLIBCXX_BEGIN_NAMESPACE_VERSION
41
42 // Functions for portable atomic access.
43 // To abstract locking primitives across all thread policies, use:
44 // __exchange_and_add_dispatch
45 // __atomic_add_dispatch
46#ifdef _GLIBCXX_ATOMIC_BUILTINS
47 static inline _Atomic_word
48 __exchange_and_add(volatile _Atomic_word* __mem, int __val)
49 { return __atomic_fetch_add(__mem, __val, __ATOMIC_ACQ_REL); }
50
51 static inline void
52 __atomic_add(volatile _Atomic_word* __mem, int __val)
53 { __atomic_fetch_add(__mem, __val, __ATOMIC_ACQ_REL); }
54#else
55 _Atomic_word
56 __attribute__ ((__unused__))
57 __exchange_and_add(volatile _Atomic_word*, int) throw ();
58
59 void
60 __attribute__ ((__unused__))
61 __atomic_add(volatile _Atomic_word*, int) throw ();
62#endif
63
64 static inline _Atomic_word
65 __exchange_and_add_single(_Atomic_word* __mem, int __val)
66 {
67 _Atomic_word __result = *__mem;
68 *__mem += __val;
69 return __result;
70 }
71
72 static inline void
73 __atomic_add_single(_Atomic_word* __mem, int __val)
74 { *__mem += __val; }
75
76 static inline _Atomic_word
77 __attribute__ ((__unused__))
78 __exchange_and_add_dispatch(_Atomic_word* __mem, int __val)
79 {
80#ifdef __GTHREADS
81 if (__gthread_active_p())
82 return __exchange_and_add(__mem, __val);
83 else
84 return __exchange_and_add_single(__mem, __val);
85#else
86 return __exchange_and_add_single(__mem, __val);
87#endif
88 }
89
90 static inline void
91 __attribute__ ((__unused__))
92 __atomic_add_dispatch(_Atomic_word* __mem, int __val)
93 {
94#ifdef __GTHREADS
95 if (__gthread_active_p())
96 __atomic_add(__mem, __val);
97 else
98 __atomic_add_single(__mem, __val);
99#else
100 __atomic_add_single(__mem, __val);
101#endif
102 }
103
104_GLIBCXX_END_NAMESPACE_VERSION
105} // namespace
106
107// Even if the CPU doesn't need a memory barrier, we need to ensure
108// that the compiler doesn't reorder memory accesses across the
109// barriers.
110#ifndef _GLIBCXX_READ_MEM_BARRIER
111#define _GLIBCXX_READ_MEM_BARRIER __atomic_thread_fence (__ATOMIC_ACQUIRE)
112#endif
113#ifndef _GLIBCXX_WRITE_MEM_BARRIER
114#define _GLIBCXX_WRITE_MEM_BARRIER __atomic_thread_fence (__ATOMIC_RELEASE)
115#endif
116
117#endif
118