LCOV - code coverage report
Current view: top level - src/base - atomicops_internals_portable.h (source / functions) Hit Total Coverage
Test: app.info Lines: 30 30 100.0 %
Date: 2019-04-17 Functions: 0 0 -

          Line data    Source code
       1             : // Copyright 2016 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : // This file is an internal atomic implementation, use atomicops.h instead.
       6             : //
       7             : // This implementation uses C++11 atomics' member functions. The code base is
       8             : // currently written assuming atomicity revolves around accesses instead of
       9             : // C++11's memory locations. The burden is on the programmer to ensure that all
      10             : // memory locations accessed atomically are never accessed non-atomically (tsan
      11             : // should help with this).
      12             : //
      13             : // Of note in this implementation:
      14             : //  * All NoBarrier variants are implemented as relaxed.
      15             : //  * All Barrier variants are implemented as sequentially-consistent.
      16             : //  * Compare exchange's failure ordering is always the same as the success one
      17             : //    (except for release, which fails as relaxed): using a weaker ordering is
      18             : //    only valid under certain uses of compare exchange.
      19             : //  * Acquire store doesn't exist in the C11 memory model, it is instead
      20             : //    implemented as a relaxed store followed by a sequentially consistent
      21             : //    fence.
      22             : //  * Release load doesn't exist in the C11 memory model, it is instead
      23             : //    implemented as sequentially consistent fence followed by a relaxed load.
      24             : //  * Atomic increment is expected to return the post-incremented value, whereas
      25             : //    C11 fetch add returns the previous value. The implementation therefore
      26             : //    needs to increment twice (which the compiler should be able to detect and
      27             : //    optimize).
      28             : 
      29             : #ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
      30             : #define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
      31             : 
      32             : #include <atomic>
      33             : 
      34             : #include "src/base/build_config.h"
      35             : #include "src/base/macros.h"
      36             : 
      37             : namespace v8 {
      38             : namespace base {
      39             : 
      40             : // This implementation is transitional and maintains the original API for
      41             : // atomicops.h.
      42             : 
      43             : inline void SeqCst_MemoryFence() {
      44             : #if defined(__GLIBCXX__)
      45             :   // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
      46             :   // not defined, leading to the linker complaining about undefined references.
      47     3261337 :   __atomic_thread_fence(std::memory_order_seq_cst);
      48             : #else
      49             :   std::atomic_thread_fence(std::memory_order_seq_cst);
      50             : #endif
      51             : }
      52             : 
      53             : inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
      54             :                                        Atomic16 old_value, Atomic16 new_value) {
      55             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      56    22471992 :                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
      57    22471992 :   return old_value;
      58             : }
      59             : 
      60             : inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
      61             :                                        Atomic32 old_value, Atomic32 new_value) {
      62             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      63          15 :                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
      64          15 :   return old_value;
      65             : }
      66             : 
      67             : inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
      68             :                                        Atomic32 new_value) {
      69         832 :   return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
      70             : }
      71             : 
      72             : inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
      73             :                                         Atomic32 increment) {
      74         208 :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
      75             : }
      76             : 
      77             : inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
      78             :                                         Atomic32 increment) {
      79             :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
      80             : }
      81             : 
      82             : inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
      83             :                                        Atomic32 old_value, Atomic32 new_value) {
      84             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      85             :                               __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
      86             :   return old_value;
      87             : }
      88             : 
      89             : inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
      90             :                                       Atomic8 new_value) {
      91             :   bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      92     1196898 :                                             __ATOMIC_RELEASE, __ATOMIC_RELAXED);
      93             :   USE(result);  // Make gcc compiler happy.
      94         336 :   return old_value;
      95             : }
      96             : 
      97             : inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
      98             :                                        Atomic32 old_value, Atomic32 new_value) {
      99             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
     100  1323451426 :                               __ATOMIC_RELEASE, __ATOMIC_RELAXED);
     101  1323451426 :   return old_value;
     102             : }
     103             : 
     104             : inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
     105   194649575 :   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
     106             : }
     107             : 
     108             : inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
     109    95465672 :   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
     110             : }
     111             : 
     112             : inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
     113   419048168 :   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
     114             : }
     115             : 
     116             : inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
     117    16163709 :   __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
     118             : }
     119             : 
     120             : inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
     121  4632462935 :   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
     122             : }
     123             : 
     124             : inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
     125   305617561 :   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
     126             : }
     127             : 
     128             : inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
     129  8526541265 :   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
     130             : }
     131             : 
     132             : inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
     133  1568207418 :   return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
     134             : }
     135             : 
     136             : #if defined(V8_HOST_ARCH_64_BIT)
     137             : 
     138             : inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
     139             :                                        Atomic64 old_value, Atomic64 new_value) {
     140             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
     141          15 :                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
     142          15 :   return old_value;
     143             : }
     144             : 
     145             : inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
     146             :                                        Atomic64 new_value) {
     147          15 :   return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
     148             : }
     149             : 
     150             : inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
     151             :                                         Atomic64 increment) {
     152          55 :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
     153             : }
     154             : 
     155             : inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
     156             :                                         Atomic64 increment) {
     157             :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
     158             : }
     159             : 
     160             : inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
     161             :                                        Atomic64 old_value, Atomic64 new_value) {
     162             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
     163             :                               __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
     164             :   return old_value;
     165             : }
     166             : 
     167             : inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
     168             :                                        Atomic64 old_value, Atomic64 new_value) {
     169     1423433 :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
     170   116481355 :                               __ATOMIC_RELEASE, __ATOMIC_RELAXED);
     171   116481355 :   return old_value;
     172             : }
     173             : 
     174             : inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
     175  6029284197 :   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
     176             : }
     177             : 
     178             : inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
     179   257693637 :   __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
     180             : }
     181             : 
     182             : inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
     183 46473522760 :   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
     184             : }
     185             : 
     186             : inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
     187  2854851162 :   return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
     188             : }
     189             : 
     190             : #endif  // defined(V8_HOST_ARCH_64_BIT)
     191             : }  // namespace base
     192             : }  // namespace v8
     193             : 
     194             : #endif  // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_

Generated by: LCOV version 1.10