Line 0
Link Here
|
|
|
1 |
--- deps/v8/src/atomicops_internals_generic_gcc.h.orig 2015-06-01 14:05:59 UTC |
2 |
+++ deps/v8/src/atomicops_internals_generic_gcc.h |
3 |
@@ -0,0 +1,135 @@ |
4 |
+// Copyright 2013 Red Hat Inc. All rights reserved. |
5 |
+// |
6 |
+// Redistribution and use in source and binary forms, with or without |
7 |
+// modification, are permitted provided that the following conditions are |
8 |
+// met: |
9 |
+// |
10 |
+// * Redistributions of source code must retain the above copyright |
11 |
+// notice, this list of conditions and the following disclaimer. |
12 |
+// * Redistributions in binary form must reproduce the above |
13 |
+// copyright notice, this list of conditions and the following disclaimer |
14 |
+// in the documentation and/or other materials provided with the |
15 |
+// distribution. |
16 |
+// * Neither the name of Red Hat Inc. nor the names of its |
17 |
+// contributors may be used to endorse or promote products derived from |
18 |
+// this software without specific prior written permission. |
19 |
+// |
20 |
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
21 |
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
22 |
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
23 |
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
24 |
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
25 |
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
26 |
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
27 |
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
28 |
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
29 |
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
30 |
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
31 |
+ |
32 |
+// This file is an internal atomic implementation, use atomicops.h instead. |
33 |
+ |
34 |
+#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
35 |
+#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
36 |
+ |
37 |
+namespace v8 { |
38 |
+namespace internal { |
39 |
+ |
40 |
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
41 |
+ Atomic32 old_value, |
42 |
+ Atomic32 new_value) { |
43 |
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, true, |
44 |
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
45 |
+ return old_value; |
46 |
+} |
47 |
+ |
48 |
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
49 |
+ Atomic32 new_value) { |
50 |
+ return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
51 |
+} |
52 |
+ |
53 |
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
54 |
+ Atomic32 increment) { |
55 |
+ return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED); |
56 |
+} |
57 |
+ |
58 |
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
59 |
+ Atomic32 increment) { |
60 |
+ return __atomic_add_fetch(ptr, increment, __ATOMIC_SEQ_CST); |
61 |
+} |
62 |
+ |
63 |
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
64 |
+ Atomic32 old_value, |
65 |
+ Atomic32 new_value) { |
66 |
+ __atomic_compare_exchange(ptr, &old_value, &new_value, true, |
67 |
+ __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); |
68 |
+ return old_value; |
69 |
+} |
70 |
+ |
71 |
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
72 |
+ Atomic32 old_value, |
73 |
+ Atomic32 new_value) { |
74 |
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, true, |
75 |
+ __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); |
76 |
+ return old_value; |
77 |
+} |
78 |
+ |
79 |
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
80 |
+ __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
81 |
+} |
82 |
+ |
83 |
+inline void MemoryBarrier() { |
84 |
+ __sync_synchronize(); |
85 |
+} |
86 |
+ |
87 |
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
88 |
+ __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST); |
89 |
+} |
90 |
+ |
91 |
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
92 |
+ __atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
93 |
+} |
94 |
+ |
95 |
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
96 |
+ return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
97 |
+} |
98 |
+ |
99 |
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
100 |
+ return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); |
101 |
+} |
102 |
+ |
103 |
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
104 |
+ return __atomic_load_n(ptr, __ATOMIC_SEQ_CST); |
105 |
+} |
106 |
+ |
107 |
+#ifdef __LP64__ |
108 |
+ |
109 |
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
110 |
+ __atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
111 |
+} |
112 |
+ |
113 |
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
114 |
+ return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); |
115 |
+} |
116 |
+ |
117 |
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
118 |
+ Atomic64 old_value, |
119 |
+ Atomic64 new_value) { |
120 |
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, true, |
121 |
+ __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); |
122 |
+ return old_value; |
123 |
+} |
124 |
+ |
125 |
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
126 |
+ Atomic64 old_value, |
127 |
+ Atomic64 new_value) { |
128 |
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, true, |
129 |
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
130 |
+ return old_value; |
131 |
+} |
132 |
+ |
133 |
+#endif // defined(__LP64__) |
134 |
+ |
135 |
+} // namespace internal |
136 |
+} // v8 |
137 |
+ |
138 |
+#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_ |