LCOV - code coverage report
Current view: top level - src/base - atomicops_internals_portable.h (source / functions) Hit Total Coverage
Test: app.info Lines: 37 37 100.0 %
Date: 2017-10-20 Functions: 7 7 100.0 %

          Line data    Source code
       1             : // Copyright 2016 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : // This file is an internal atomic implementation, use atomicops.h instead.
       6             : //
       7             : // This implementation uses C++11 atomics' member functions. The code base is
       8             : // currently written assuming atomicity revolves around accesses instead of
       9             : // C++11's memory locations. The burden is on the programmer to ensure that all
      10             : // memory locations accessed atomically are never accessed non-atomically (tsan
      11             : // should help with this).
      12             : //
      13             : // Of note in this implementation:
      14             : //  * All NoBarrier variants are implemented as relaxed.
      15             : //  * All Barrier variants are implemented as sequentially-consistent.
      16             : //  * Compare exchange's failure ordering is always the same as the success one
      17             : //    (except for release, which fails as relaxed): using a weaker ordering is
      18             : //    only valid under certain uses of compare exchange.
      19             : //  * Acquire store doesn't exist in the C11 memory model, it is instead
      20             : //    implemented as a relaxed store followed by a sequentially consistent
      21             : //    fence.
      22             : //  * Release load doesn't exist in the C11 memory model, it is instead
      23             : //    implemented as sequentially consistent fence followed by a relaxed load.
      24             : //  * Atomic increment is expected to return the post-incremented value, whereas
      25             : //    C11 fetch add returns the previous value. The implementation therefore
      26             : //    needs to increment twice (which the compiler should be able to detect and
      27             : //    optimize).
      28             : 
      29             : #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
      30             : #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
      31             : 
      32             : #include <atomic>
      33             : 
      34             : #include "src/base/build_config.h"
      35             : #include "src/base/macros.h"
      36             : 
      37             : namespace v8 {
      38             : namespace base {
      39             : 
      40             : // This implementation is transitional and maintains the original API for
      41             : // atomicops.h.
      42             : 
      43             : inline void SeqCst_MemoryFence() {
      44             : #if defined(__GLIBCXX__)
      45             :   // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
      46             :   // not defined, leading to the linker complaining about undefined references.
      47     2936127 :   __atomic_thread_fence(std::memory_order_seq_cst);
      48             : #else
      49             :   std::atomic_thread_fence(std::memory_order_seq_cst);
      50             : #endif
      51             : }
      52             : 
      53             : inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
      54             :                                        Atomic32 old_value, Atomic32 new_value) {
      55             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      56          18 :                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
      57          18 :   return old_value;
      58             : }
      59             : 
      60             : inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
      61             :                                        Atomic32 new_value) {
      62         299 :   return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
      63             : }
      64             : 
      65             : inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
      66             :                                         Atomic32 increment) {
      67      120836 :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
      68             : }
      69             : 
      70             : inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
      71             :                                         Atomic32 increment) {
      72             :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
      73             : }
      74             : 
      75             : inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
      76             :                                        Atomic32 old_value, Atomic32 new_value) {
      77             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      78             :                               __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
      79             :   return old_value;
      80             : }
      81             : 
      82             : inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
      83             :                                       Atomic8 new_value) {
      84             :   bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      85     1158398 :                                             __ATOMIC_RELEASE, __ATOMIC_RELAXED);
      86             :   USE(result);  // Make gcc compiler happy.
      87         336 :   return old_value;
      88             : }
      89             : 
      90             : inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
      91             :                                        Atomic32 old_value, Atomic32 new_value) {
      92             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
      93  1350231101 :                               __ATOMIC_RELEASE, __ATOMIC_RELAXED);
      94  1350231101 :   return old_value;
      95             : }
      96             : 
      97             : inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
      98    55628205 :   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
      99             : }
     100             : 
     101             : inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
     102  3416298713 :   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
     103             : }
     104             : 
     105             : inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
     106     4641945 :   __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
     107             : }
     108             : 
     109             : inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
     110  2950920318 :   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
     111             : }
     112             : 
     113    12392987 : inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
     114  6090002996 :   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
     115             : }
     116             : 
     117             : inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
     118  1112506424 :   return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
     119             : }
     120             : 
     121             : #if defined(V8_HOST_ARCH_64_BIT)
     122             : 
     123             : inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
     124             :                                        Atomic64 old_value, Atomic64 new_value) {
     125             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
     126     2281762 :                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
     127     2281762 :   return old_value;
     128             : }
     129             : 
     130             : inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
     131             :                                        Atomic64 new_value) {
     132          18 :   return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
     133             : }
     134             : 
     135             : inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
     136             :                                         Atomic64 increment) {
     137    86039490 :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
     138             : }
     139             : 
     140   231938361 : inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
     141             :                                         Atomic64 increment) {
     142   231938361 :   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
     143             : }
     144             : 
     145             : inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
     146             :                                        Atomic64 old_value, Atomic64 new_value) {
     147             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
     148      609928 :                               __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
     149      609928 :   return old_value;
     150             : }
     151             : 
     152     4782461 : inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
     153             :                                        Atomic64 old_value, Atomic64 new_value) {
     154             :   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
     155   111980985 :                               __ATOMIC_RELEASE, __ATOMIC_RELAXED);
     156   111980985 :   return old_value;
     157             : }
     158             : 
     159        4634 : inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
     160  6361859349 :   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
     161        4634 : }
     162             : 
     163    15016989 : inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
     164   118378592 :   __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
     165    15016989 : }
     166             : 
     167      147171 : inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
     168 44691135811 :   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
     169             : }
     170             : 
     171   938536767 : inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
     172  3880364287 :   return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
     173             : }
     174             : 
     175             : #endif  // defined(V8_HOST_ARCH_64_BIT)
     176             : }  // namespace base
     177             : }  // namespace v8
     178             : 
     179             : #endif  // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_

Generated by: LCOV version 1.10