LCOV - code coverage report
Current view: top level - src/base - atomicops_internals_portable.h (source / functions) Hit Total Coverage
Test: app.info Lines: 31 31 100.0 %
Date: 2017-04-26 Functions: 8 8 100.0 %

          Line data    Source code
       1             : // Copyright 2016 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : // This file is an internal atomic implementation, use atomicops.h instead.
       6             : //
       7             : // This implementation uses C++11 atomics' member functions. The code base is
       8             : // currently written assuming atomicity revolves around accesses instead of
       9             : // C++11's memory locations. The burden is on the programmer to ensure that all
      10             : // memory locations accessed atomically are never accessed non-atomically (tsan
      11             : // should help with this).
      12             : //
      13             : // Of note in this implementation:
      14             : //  * All NoBarrier variants are implemented as relaxed.
      15             : //  * All Barrier variants are implemented as sequentially-consistent.
      16             : //  * Compare exchange's failure ordering is always the same as the success one
      17             : //    (except for release, which fails as relaxed): using a weaker ordering is
      18             : //    only valid under certain uses of compare exchange.
      19             : //  * Acquire store doesn't exist in the C11 memory model, it is instead
      20             : //    implemented as a relaxed store followed by a sequentially consistent
      21             : //    fence.
      22             : //  * Release load doesn't exist in the C11 memory model, it is instead
      23             : //    implemented as sequentially consistent fence followed by a relaxed load.
      24             : //  * Atomic increment is expected to return the post-incremented value, whereas
      25             : //    C11 fetch add returns the previous value. The implementation therefore
      26             : //    needs to increment twice (which the compiler should be able to detect and
      27             : //    optimize).
      28             : 
      29             : #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
      30             : #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
      31             : 
      32             : #include <atomic>
      33             : 
      34             : #include "src/base/build_config.h"
      35             : 
      36             : namespace v8 {
      37             : namespace base {
      38             : 
      39             : // This implementation is transitional and maintains the original API for
      40             : // atomicops.h.
      41             : 
      42             : inline void MemoryBarrier() {
      43             : #if defined(__GLIBCXX__)
      44             :   // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
      45             :   // not defined, leading to the linker complaining about undefined references.
      46     1101593 :   __atomic_thread_fence(std::memory_order_seq_cst);
      47             : #else
      48             :   std::atomic_thread_fence(std::memory_order_seq_cst);
      49             : #endif
      50             : }
      51             : 
      52             : inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
      53             :                                          Atomic32 old_value,
      54             :                                          Atomic32 new_value) {
      55             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      56             :                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
      57             :   return old_value;
      58             : }
      59             : 
      60             : inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
      61             :                                          Atomic32 new_value) {
      62         337 :   return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
      63             : }
      64             : 
      65             : inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
      66             :                                           Atomic32 increment) {
      67      139349 :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
      68             : }
      69             : 
      70             : inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
      71             :                                         Atomic32 increment) {
      72             :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
      73             : }
      74             : 
      75             : inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
      76             :                                        Atomic32 old_value, Atomic32 new_value) {
      77             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      78             :                               __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
      79             :   return old_value;
      80             : }
      81             : 
      82             : inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
      83             :                                        Atomic32 old_value, Atomic32 new_value) {
      84             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      85             :                               __ATOMIC_RELEASE, __ATOMIC_RELAXED);
      86             :   return old_value;
      87             : }
      88             : 
      89             : inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
      90    32314796 :   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
      91             : }
      92             : 
      93             : inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
      94     4493993 :   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
      95             : }
      96             : 
      97             : inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
      98      463887 :   __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
      99             : }
     100             : 
     101             : inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
     102  3409086196 :   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
     103             : }
     104             : 
     105    11109883 : inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
     106  7368829325 :   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
     107             : }
     108             : 
     109             : inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
     110     1101593 :   return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
     111             : }
     112             : 
     113             : #if defined(V8_HOST_ARCH_64_BIT)
     114             : 
     115    30850265 : inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
     116             :                                          Atomic64 old_value,
     117             :                                          Atomic64 new_value) {
     118             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
     119    53197156 :                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
     120    33131272 :   return old_value;
     121             : }
     122             : 
     123             : inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
     124             :                                          Atomic64 new_value) {
     125             :   return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
     126             : }
     127             : 
     128             : inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
     129             :                                           Atomic64 increment) {
     130    90530621 :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
     131             : }
     132             : 
     133    43241875 : inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
     134             :                                         Atomic64 increment) {
     135    43241875 :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
     136             : }
     137             : 
     138             : inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
     139             :                                        Atomic64 old_value, Atomic64 new_value) {
     140             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
     141      782864 :                               __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
     142      782864 :   return old_value;
     143             : }
     144             : 
     145   151409411 : inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
     146             :                                        Atomic64 old_value, Atomic64 new_value) {
     147             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
     148   197260938 :                               __ATOMIC_RELEASE, __ATOMIC_RELAXED);
     149   151409411 :   return old_value;
     150             : }
     151             : 
     152        5894 : inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
     153  5517389018 :   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
     154        5894 : }
     155             : 
     156   108814144 : inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
     157   219623354 :   __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
     158   108814144 : }
     159             : 
     160  1203246070 : inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
     161 59316654527 :   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
     162             : }
     163             : 
     164  1467184537 : inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
     165  3027653900 :   return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
     166             : }
     167             : 
     168             : #endif  // defined(V8_HOST_ARCH_64_BIT)
     169             : }  // namespace base
     170             : }  // namespace v8
     171             : 
     172             : #endif  // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_

Generated by: LCOV version 1.10