Coverage Report

Created: 2025-07-11 06:37

/src/abseil-cpp/absl/debugging/internal/vdso_support.cc
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
// Allow dynamic symbol lookup in the kernel VDSO page.
16
//
17
// VDSOSupport -- a class representing kernel VDSO (if present).
18
19
#include "absl/debugging/internal/vdso_support.h"
20
#include "absl/base/attributes.h"
21
22
#ifdef ABSL_HAVE_VDSO_SUPPORT     // defined in vdso_support.h
23
24
#if !defined(__has_include)
25
#define __has_include(header) 0
26
#endif
27
28
#include <errno.h>
29
#include <fcntl.h>
30
#if __has_include(<syscall.h>)
31
#include <syscall.h>
32
#elif __has_include(<sys/syscall.h>)
33
#include <sys/syscall.h>
34
#endif
35
#include <unistd.h>
36
37
#if !defined(__UCLIBC__) && defined(__GLIBC__) && \
38
    (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
39
#define ABSL_HAVE_GETAUXVAL
40
#endif
41
42
#ifdef ABSL_HAVE_GETAUXVAL
43
#include <sys/auxv.h>
44
#endif
45
46
#include "absl/base/dynamic_annotations.h"
47
#include "absl/base/internal/raw_logging.h"
48
#include "absl/base/port.h"
49
50
#ifndef AT_SYSINFO_EHDR
51
#define AT_SYSINFO_EHDR 33  // for crosstoolv10
52
#endif
53
54
#if defined(__NetBSD__)
55
using Elf32_auxv_t = Aux32Info;
56
using Elf64_auxv_t = Aux64Info;
57
#endif
58
#if defined(__FreeBSD__)
59
#if defined(__ELF_WORD_SIZE) && __ELF_WORD_SIZE == 64
60
using Elf64_auxv_t = Elf64_Auxinfo;
61
#endif
62
using Elf32_auxv_t = Elf32_Auxinfo;
63
#endif
64
65
namespace absl {
66
ABSL_NAMESPACE_BEGIN
67
namespace debugging_internal {
68
69
ABSL_CONST_INIT
70
std::atomic<const void *> VDSOSupport::vdso_base_(
71
    debugging_internal::ElfMemImage::kInvalidBase);
72
73
ABSL_CONST_INIT std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(
74
    &InitAndGetCPU);
75
76
VDSOSupport::VDSOSupport()
77
    // If vdso_base_ is still set to kInvalidBase, we got here
78
    // before VDSOSupport::Init has been called. Call it now.
79
0
    : image_(vdso_base_.load(std::memory_order_relaxed) ==
80
0
                     debugging_internal::ElfMemImage::kInvalidBase
81
0
                 ? Init()
82
0
                 : vdso_base_.load(std::memory_order_relaxed)) {}
83
84
// NOTE: we can't use GoogleOnceInit() below, because we can be
85
// called by tcmalloc, and none of the *once* stuff may be functional yet.
86
//
87
// In addition, we hope that the VDSOSupportHelper constructor
88
// causes this code to run before there are any threads, and before
89
// InitGoogle() has executed any chroot or setuid calls.
90
//
91
// Finally, even if there is a race here, it is harmless, because
92
// the operation should be idempotent.
93
0
const void *VDSOSupport::Init() {
94
0
  const auto kInvalidBase = debugging_internal::ElfMemImage::kInvalidBase;
95
0
#ifdef ABSL_HAVE_GETAUXVAL
96
0
  if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
97
0
    errno = 0;
98
0
    const void *const sysinfo_ehdr =
99
0
        reinterpret_cast<const void *>(getauxval(AT_SYSINFO_EHDR));
100
0
    if (errno == 0) {
101
0
      vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
102
0
    }
103
0
  }
104
0
#endif  // ABSL_HAVE_GETAUXVAL
105
0
  if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
106
0
    int fd = open("/proc/self/auxv", O_RDONLY);
107
0
    if (fd == -1) {
108
      // Kernel too old to have a VDSO.
109
0
      vdso_base_.store(nullptr, std::memory_order_relaxed);
110
0
      getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
111
0
      return nullptr;
112
0
    }
113
0
    ElfW(auxv_t) aux;
114
0
    while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
115
0
      if (aux.a_type == AT_SYSINFO_EHDR) {
116
#if defined(__NetBSD__)
117
        vdso_base_.store(reinterpret_cast<void *>(aux.a_v),
118
                         std::memory_order_relaxed);
119
#else
120
0
        vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
121
0
                         std::memory_order_relaxed);
122
0
#endif
123
0
        break;
124
0
      }
125
0
    }
126
0
    close(fd);
127
0
    if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
128
      // Didn't find AT_SYSINFO_EHDR in auxv[].
129
0
      vdso_base_.store(nullptr, std::memory_order_relaxed);
130
0
    }
131
0
  }
132
0
  GetCpuFn fn = &GetCPUViaSyscall;  // default if VDSO not present.
133
0
  if (vdso_base_.load(std::memory_order_relaxed)) {
134
0
    VDSOSupport vdso;
135
0
    SymbolInfo info;
136
0
    if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
137
0
      fn = reinterpret_cast<GetCpuFn>(const_cast<void *>(info.address));
138
0
    }
139
0
  }
140
  // Subtle: this code runs outside of any locks; prevent compiler
141
  // from assigning to getcpu_fn_ more than once.
142
0
  getcpu_fn_.store(fn, std::memory_order_relaxed);
143
0
  return vdso_base_.load(std::memory_order_relaxed);
144
0
}
145
146
0
const void *VDSOSupport::SetBase(const void *base) {
147
0
  ABSL_RAW_CHECK(base != debugging_internal::ElfMemImage::kInvalidBase,
148
0
                 "internal error");
149
0
  const void *old_base = vdso_base_.load(std::memory_order_relaxed);
150
0
  vdso_base_.store(base, std::memory_order_relaxed);
151
0
  image_.Init(base);
152
  // Also reset getcpu_fn_, so GetCPU could be tested with simulated VDSO.
153
0
  getcpu_fn_.store(&InitAndGetCPU, std::memory_order_relaxed);
154
0
  return old_base;
155
0
}
156
157
bool VDSOSupport::LookupSymbol(const char *name,
158
                               const char *version,
159
                               int type,
160
0
                               SymbolInfo *info) const {
161
0
  return image_.LookupSymbol(name, version, type, info);
162
0
}
163
164
bool VDSOSupport::LookupSymbolByAddress(const void *address,
165
0
                                        SymbolInfo *info_out) const {
166
0
  return image_.LookupSymbolByAddress(address, info_out);
167
0
}
168
169
// NOLINT on 'long' because this routine mimics kernel api.
170
long VDSOSupport::GetCPUViaSyscall(unsigned *cpu,  // NOLINT(runtime/int)
171
0
                                   void *, void *) {
172
0
#ifdef SYS_getcpu
173
0
  return syscall(SYS_getcpu, cpu, nullptr, nullptr);
174
#else
175
  // x86_64 never implemented sys_getcpu(), except as a VDSO call.
176
  static_cast<void>(cpu);  // Avoid an unused argument compiler warning.
177
  errno = ENOSYS;
178
  return -1;
179
#endif
180
0
}
181
182
// Use fast __vdso_getcpu if available.
183
long VDSOSupport::InitAndGetCPU(unsigned *cpu,  // NOLINT(runtime/int)
184
0
                                void *x, void *y) {
185
0
  Init();
186
0
  GetCpuFn fn = getcpu_fn_.load(std::memory_order_relaxed);
187
0
  ABSL_RAW_CHECK(fn != &InitAndGetCPU, "Init() did not set getcpu_fn_");
188
0
  return (*fn)(cpu, x, y);
189
0
}
190
191
// This function must be very fast, and may be called from very
192
// low level (e.g. tcmalloc). Hence I avoid things like
193
// GoogleOnceInit() and ::operator new.
194
// The destination in VDSO is unknown to CFI and VDSO does not set MSAN
195
// shadow for the return value.
196
ABSL_ATTRIBUTE_NO_SANITIZE_CFI
197
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
198
0
int GetCPU() {
199
0
  unsigned cpu;
200
0
  long ret_code =  // NOLINT(runtime/int)
201
0
      (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr);
202
0
  return ret_code == 0 ? static_cast<int>(cpu) : static_cast<int>(ret_code);
203
0
}
204
205
}  // namespace debugging_internal
206
ABSL_NAMESPACE_END
207
}  // namespace absl
208
209
#endif  // ABSL_HAVE_VDSO_SUPPORT