Coverage Report

Created: 2024-01-17 10:31

/src/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- Cuda.cpp - Cuda Tool and ToolChain Implementations -----*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
#include "Cuda.h"
10
#include "CommonArgs.h"
11
#include "clang/Basic/Cuda.h"
12
#include "clang/Config/config.h"
13
#include "clang/Driver/Compilation.h"
14
#include "clang/Driver/Distro.h"
15
#include "clang/Driver/Driver.h"
16
#include "clang/Driver/DriverDiagnostic.h"
17
#include "clang/Driver/InputInfo.h"
18
#include "clang/Driver/Options.h"
19
#include "llvm/ADT/StringExtras.h"
20
#include "llvm/Option/ArgList.h"
21
#include "llvm/Support/FileSystem.h"
22
#include "llvm/Support/FormatAdapters.h"
23
#include "llvm/Support/FormatVariadic.h"
24
#include "llvm/Support/Path.h"
25
#include "llvm/Support/Process.h"
26
#include "llvm/Support/Program.h"
27
#include "llvm/Support/VirtualFileSystem.h"
28
#include "llvm/TargetParser/Host.h"
29
#include "llvm/TargetParser/TargetParser.h"
30
#include <system_error>
31
32
using namespace clang::driver;
33
using namespace clang::driver::toolchains;
34
using namespace clang::driver::tools;
35
using namespace clang;
36
using namespace llvm::opt;
37
38
namespace {
39
40
0
CudaVersion getCudaVersion(uint32_t raw_version) {
41
0
  if (raw_version < 7050)
42
0
    return CudaVersion::CUDA_70;
43
0
  if (raw_version < 8000)
44
0
    return CudaVersion::CUDA_75;
45
0
  if (raw_version < 9000)
46
0
    return CudaVersion::CUDA_80;
47
0
  if (raw_version < 9010)
48
0
    return CudaVersion::CUDA_90;
49
0
  if (raw_version < 9020)
50
0
    return CudaVersion::CUDA_91;
51
0
  if (raw_version < 10000)
52
0
    return CudaVersion::CUDA_92;
53
0
  if (raw_version < 10010)
54
0
    return CudaVersion::CUDA_100;
55
0
  if (raw_version < 10020)
56
0
    return CudaVersion::CUDA_101;
57
0
  if (raw_version < 11000)
58
0
    return CudaVersion::CUDA_102;
59
0
  if (raw_version < 11010)
60
0
    return CudaVersion::CUDA_110;
61
0
  if (raw_version < 11020)
62
0
    return CudaVersion::CUDA_111;
63
0
  if (raw_version < 11030)
64
0
    return CudaVersion::CUDA_112;
65
0
  if (raw_version < 11040)
66
0
    return CudaVersion::CUDA_113;
67
0
  if (raw_version < 11050)
68
0
    return CudaVersion::CUDA_114;
69
0
  if (raw_version < 11060)
70
0
    return CudaVersion::CUDA_115;
71
0
  if (raw_version < 11070)
72
0
    return CudaVersion::CUDA_116;
73
0
  if (raw_version < 11080)
74
0
    return CudaVersion::CUDA_117;
75
0
  if (raw_version < 11090)
76
0
    return CudaVersion::CUDA_118;
77
0
  if (raw_version < 12010)
78
0
    return CudaVersion::CUDA_120;
79
0
  if (raw_version < 12020)
80
0
    return CudaVersion::CUDA_121;
81
0
  if (raw_version < 12030)
82
0
    return CudaVersion::CUDA_122;
83
0
  if (raw_version < 12040)
84
0
    return CudaVersion::CUDA_123;
85
0
  return CudaVersion::NEW;
86
0
}
87
88
0
CudaVersion parseCudaHFile(llvm::StringRef Input) {
89
  // Helper lambda which skips the words if the line starts with them or returns
90
  // std::nullopt otherwise.
91
0
  auto StartsWithWords =
92
0
      [](llvm::StringRef Line,
93
0
         const SmallVector<StringRef, 3> words) -> std::optional<StringRef> {
94
0
    for (StringRef word : words) {
95
0
      if (!Line.consume_front(word))
96
0
        return {};
97
0
      Line = Line.ltrim();
98
0
    }
99
0
    return Line;
100
0
  };
101
102
0
  Input = Input.ltrim();
103
0
  while (!Input.empty()) {
104
0
    if (auto Line =
105
0
            StartsWithWords(Input.ltrim(), {"#", "define", "CUDA_VERSION"})) {
106
0
      uint32_t RawVersion;
107
0
      Line->consumeInteger(10, RawVersion);
108
0
      return getCudaVersion(RawVersion);
109
0
    }
110
    // Find next non-empty line.
111
0
    Input = Input.drop_front(Input.find_first_of("\n\r")).ltrim();
112
0
  }
113
0
  return CudaVersion::UNKNOWN;
114
0
}
115
} // namespace
116
117
0
void CudaInstallationDetector::WarnIfUnsupportedVersion() {
118
0
  if (Version > CudaVersion::PARTIALLY_SUPPORTED) {
119
0
    std::string VersionString = CudaVersionToString(Version);
120
0
    if (!VersionString.empty())
121
0
      VersionString.insert(0, " ");
122
0
    D.Diag(diag::warn_drv_new_cuda_version)
123
0
        << VersionString
124
0
        << (CudaVersion::PARTIALLY_SUPPORTED != CudaVersion::FULLY_SUPPORTED)
125
0
        << CudaVersionToString(CudaVersion::PARTIALLY_SUPPORTED);
126
0
  } else if (Version > CudaVersion::FULLY_SUPPORTED)
127
0
    D.Diag(diag::warn_drv_partially_supported_cuda_version)
128
0
        << CudaVersionToString(Version);
129
0
}
130
131
CudaInstallationDetector::CudaInstallationDetector(
132
    const Driver &D, const llvm::Triple &HostTriple,
133
    const llvm::opt::ArgList &Args)
134
0
    : D(D) {
135
0
  struct Candidate {
136
0
    std::string Path;
137
0
    bool StrictChecking;
138
139
0
    Candidate(std::string Path, bool StrictChecking = false)
140
0
        : Path(Path), StrictChecking(StrictChecking) {}
141
0
  };
142
0
  SmallVector<Candidate, 4> Candidates;
143
144
  // In decreasing order so we prefer newer versions to older versions.
145
0
  std::initializer_list<const char *> Versions = {"8.0", "7.5", "7.0"};
146
0
  auto &FS = D.getVFS();
147
148
0
  if (Args.hasArg(clang::driver::options::OPT_cuda_path_EQ)) {
149
0
    Candidates.emplace_back(
150
0
        Args.getLastArgValue(clang::driver::options::OPT_cuda_path_EQ).str());
151
0
  } else if (HostTriple.isOSWindows()) {
152
0
    for (const char *Ver : Versions)
153
0
      Candidates.emplace_back(
154
0
          D.SysRoot + "/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v" +
155
0
          Ver);
156
0
  } else {
157
0
    if (!Args.hasArg(clang::driver::options::OPT_cuda_path_ignore_env)) {
158
      // Try to find ptxas binary. If the executable is located in a directory
159
      // called 'bin/', its parent directory might be a good guess for a valid
160
      // CUDA installation.
161
      // However, some distributions might installs 'ptxas' to /usr/bin. In that
162
      // case the candidate would be '/usr' which passes the following checks
163
      // because '/usr/include' exists as well. To avoid this case, we always
164
      // check for the directory potentially containing files for libdevice,
165
      // even if the user passes -nocudalib.
166
0
      if (llvm::ErrorOr<std::string> ptxas =
167
0
              llvm::sys::findProgramByName("ptxas")) {
168
0
        SmallString<256> ptxasAbsolutePath;
169
0
        llvm::sys::fs::real_path(*ptxas, ptxasAbsolutePath);
170
171
0
        StringRef ptxasDir = llvm::sys::path::parent_path(ptxasAbsolutePath);
172
0
        if (llvm::sys::path::filename(ptxasDir) == "bin")
173
0
          Candidates.emplace_back(
174
0
              std::string(llvm::sys::path::parent_path(ptxasDir)),
175
0
              /*StrictChecking=*/true);
176
0
      }
177
0
    }
178
179
0
    Candidates.emplace_back(D.SysRoot + "/usr/local/cuda");
180
0
    for (const char *Ver : Versions)
181
0
      Candidates.emplace_back(D.SysRoot + "/usr/local/cuda-" + Ver);
182
183
0
    Distro Dist(FS, llvm::Triple(llvm::sys::getProcessTriple()));
184
0
    if (Dist.IsDebian() || Dist.IsUbuntu())
185
      // Special case for Debian to have nvidia-cuda-toolkit work
186
      // out of the box. More info on http://bugs.debian.org/882505
187
0
      Candidates.emplace_back(D.SysRoot + "/usr/lib/cuda");
188
0
  }
189
190
0
  bool NoCudaLib = Args.hasArg(options::OPT_nogpulib);
191
192
0
  for (const auto &Candidate : Candidates) {
193
0
    InstallPath = Candidate.Path;
194
0
    if (InstallPath.empty() || !FS.exists(InstallPath))
195
0
      continue;
196
197
0
    BinPath = InstallPath + "/bin";
198
0
    IncludePath = InstallPath + "/include";
199
0
    LibDevicePath = InstallPath + "/nvvm/libdevice";
200
201
0
    if (!(FS.exists(IncludePath) && FS.exists(BinPath)))
202
0
      continue;
203
0
    bool CheckLibDevice = (!NoCudaLib || Candidate.StrictChecking);
204
0
    if (CheckLibDevice && !FS.exists(LibDevicePath))
205
0
      continue;
206
207
0
    Version = CudaVersion::UNKNOWN;
208
0
    if (auto CudaHFile = FS.getBufferForFile(InstallPath + "/include/cuda.h"))
209
0
      Version = parseCudaHFile((*CudaHFile)->getBuffer());
210
    // As the last resort, make an educated guess between CUDA-7.0, which had
211
    // old-style libdevice bitcode, and an unknown recent CUDA version.
212
0
    if (Version == CudaVersion::UNKNOWN) {
213
0
      Version = FS.exists(LibDevicePath + "/libdevice.10.bc")
214
0
                    ? CudaVersion::NEW
215
0
                    : CudaVersion::CUDA_70;
216
0
    }
217
218
0
    if (Version >= CudaVersion::CUDA_90) {
219
      // CUDA-9+ uses single libdevice file for all GPU variants.
220
0
      std::string FilePath = LibDevicePath + "/libdevice.10.bc";
221
0
      if (FS.exists(FilePath)) {
222
0
        for (int Arch = (int)CudaArch::SM_30, E = (int)CudaArch::LAST; Arch < E;
223
0
             ++Arch) {
224
0
          CudaArch GpuArch = static_cast<CudaArch>(Arch);
225
0
          if (!IsNVIDIAGpuArch(GpuArch))
226
0
            continue;
227
0
          std::string GpuArchName(CudaArchToString(GpuArch));
228
0
          LibDeviceMap[GpuArchName] = FilePath;
229
0
        }
230
0
      }
231
0
    } else {
232
0
      std::error_code EC;
233
0
      for (llvm::vfs::directory_iterator LI = FS.dir_begin(LibDevicePath, EC),
234
0
                                         LE;
235
0
           !EC && LI != LE; LI = LI.increment(EC)) {
236
0
        StringRef FilePath = LI->path();
237
0
        StringRef FileName = llvm::sys::path::filename(FilePath);
238
        // Process all bitcode filenames that look like
239
        // libdevice.compute_XX.YY.bc
240
0
        const StringRef LibDeviceName = "libdevice.";
241
0
        if (!(FileName.starts_with(LibDeviceName) && FileName.ends_with(".bc")))
242
0
          continue;
243
0
        StringRef GpuArch = FileName.slice(
244
0
            LibDeviceName.size(), FileName.find('.', LibDeviceName.size()));
245
0
        LibDeviceMap[GpuArch] = FilePath.str();
246
        // Insert map entries for specific devices with this compute
247
        // capability. NVCC's choice of the libdevice library version is
248
        // rather peculiar and depends on the CUDA version.
249
0
        if (GpuArch == "compute_20") {
250
0
          LibDeviceMap["sm_20"] = std::string(FilePath);
251
0
          LibDeviceMap["sm_21"] = std::string(FilePath);
252
0
          LibDeviceMap["sm_32"] = std::string(FilePath);
253
0
        } else if (GpuArch == "compute_30") {
254
0
          LibDeviceMap["sm_30"] = std::string(FilePath);
255
0
          if (Version < CudaVersion::CUDA_80) {
256
0
            LibDeviceMap["sm_50"] = std::string(FilePath);
257
0
            LibDeviceMap["sm_52"] = std::string(FilePath);
258
0
            LibDeviceMap["sm_53"] = std::string(FilePath);
259
0
          }
260
0
          LibDeviceMap["sm_60"] = std::string(FilePath);
261
0
          LibDeviceMap["sm_61"] = std::string(FilePath);
262
0
          LibDeviceMap["sm_62"] = std::string(FilePath);
263
0
        } else if (GpuArch == "compute_35") {
264
0
          LibDeviceMap["sm_35"] = std::string(FilePath);
265
0
          LibDeviceMap["sm_37"] = std::string(FilePath);
266
0
        } else if (GpuArch == "compute_50") {
267
0
          if (Version >= CudaVersion::CUDA_80) {
268
0
            LibDeviceMap["sm_50"] = std::string(FilePath);
269
0
            LibDeviceMap["sm_52"] = std::string(FilePath);
270
0
            LibDeviceMap["sm_53"] = std::string(FilePath);
271
0
          }
272
0
        }
273
0
      }
274
0
    }
275
276
    // Check that we have found at least one libdevice that we can link in if
277
    // -nocudalib hasn't been specified.
278
0
    if (LibDeviceMap.empty() && !NoCudaLib)
279
0
      continue;
280
281
0
    IsValid = true;
282
0
    break;
283
0
  }
284
0
}
285
286
void CudaInstallationDetector::AddCudaIncludeArgs(
287
0
    const ArgList &DriverArgs, ArgStringList &CC1Args) const {
288
0
  if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
289
    // Add cuda_wrappers/* to our system include path.  This lets us wrap
290
    // standard library headers.
291
0
    SmallString<128> P(D.ResourceDir);
292
0
    llvm::sys::path::append(P, "include");
293
0
    llvm::sys::path::append(P, "cuda_wrappers");
294
0
    CC1Args.push_back("-internal-isystem");
295
0
    CC1Args.push_back(DriverArgs.MakeArgString(P));
296
0
  }
297
298
0
  if (DriverArgs.hasArg(options::OPT_nogpuinc))
299
0
    return;
300
301
0
  if (!isValid()) {
302
0
    D.Diag(diag::err_drv_no_cuda_installation);
303
0
    return;
304
0
  }
305
306
0
  CC1Args.push_back("-include");
307
0
  CC1Args.push_back("__clang_cuda_runtime_wrapper.h");
308
0
}
309
310
void CudaInstallationDetector::CheckCudaVersionSupportsArch(
311
0
    CudaArch Arch) const {
312
0
  if (Arch == CudaArch::UNKNOWN || Version == CudaVersion::UNKNOWN ||
313
0
      ArchsWithBadVersion[(int)Arch])
314
0
    return;
315
316
0
  auto MinVersion = MinVersionForCudaArch(Arch);
317
0
  auto MaxVersion = MaxVersionForCudaArch(Arch);
318
0
  if (Version < MinVersion || Version > MaxVersion) {
319
0
    ArchsWithBadVersion[(int)Arch] = true;
320
0
    D.Diag(diag::err_drv_cuda_version_unsupported)
321
0
        << CudaArchToString(Arch) << CudaVersionToString(MinVersion)
322
0
        << CudaVersionToString(MaxVersion) << InstallPath
323
0
        << CudaVersionToString(Version);
324
0
  }
325
0
}
326
327
0
void CudaInstallationDetector::print(raw_ostream &OS) const {
328
0
  if (isValid())
329
0
    OS << "Found CUDA installation: " << InstallPath << ", version "
330
0
       << CudaVersionToString(Version) << "\n";
331
0
}
332
333
namespace {
334
/// Debug info level for the NVPTX devices. We may need to emit different debug
335
/// info level for the host and for the device itselfi. This type controls
336
/// emission of the debug info for the devices. It either prohibits disable info
337
/// emission completely, or emits debug directives only, or emits same debug
338
/// info as for the host.
339
enum DeviceDebugInfoLevel {
340
  DisableDebugInfo,        /// Do not emit debug info for the devices.
341
  DebugDirectivesOnly,     /// Emit only debug directives.
342
  EmitSameDebugInfoAsHost, /// Use the same debug info level just like for the
343
                           /// host.
344
};
345
} // anonymous namespace
346
347
/// Define debug info level for the NVPTX devices. If the debug info for both
348
/// the host and device are disabled (-g0/-ggdb0 or no debug options at all). If
349
/// only debug directives are requested for the both host and device
350
/// (-gline-directvies-only), or the debug info only for the device is disabled
351
/// (optimization is on and --cuda-noopt-device-debug was not specified), the
352
/// debug directves only must be emitted for the device. Otherwise, use the same
353
/// debug info level just like for the host (with the limitations of only
354
/// supported DWARF2 standard).
355
0
static DeviceDebugInfoLevel mustEmitDebugInfo(const ArgList &Args) {
356
0
  const Arg *A = Args.getLastArg(options::OPT_O_Group);
357
0
  bool IsDebugEnabled = !A || A->getOption().matches(options::OPT_O0) ||
358
0
                        Args.hasFlag(options::OPT_cuda_noopt_device_debug,
359
0
                                     options::OPT_no_cuda_noopt_device_debug,
360
0
                                     /*Default=*/false);
361
0
  if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
362
0
    const Option &Opt = A->getOption();
363
0
    if (Opt.matches(options::OPT_gN_Group)) {
364
0
      if (Opt.matches(options::OPT_g0) || Opt.matches(options::OPT_ggdb0))
365
0
        return DisableDebugInfo;
366
0
      if (Opt.matches(options::OPT_gline_directives_only))
367
0
        return DebugDirectivesOnly;
368
0
    }
369
0
    return IsDebugEnabled ? EmitSameDebugInfoAsHost : DebugDirectivesOnly;
370
0
  }
371
0
  return willEmitRemarks(Args) ? DebugDirectivesOnly : DisableDebugInfo;
372
0
}
373
374
void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
375
                                    const InputInfo &Output,
376
                                    const InputInfoList &Inputs,
377
                                    const ArgList &Args,
378
0
                                    const char *LinkingOutput) const {
379
0
  const auto &TC =
380
0
      static_cast<const toolchains::NVPTXToolChain &>(getToolChain());
381
0
  assert(TC.getTriple().isNVPTX() && "Wrong platform");
382
383
0
  StringRef GPUArchName;
384
  // If this is a CUDA action we need to extract the device architecture
385
  // from the Job's associated architecture, otherwise use the -march=arch
386
  // option. This option may come from -Xopenmp-target flag or the default
387
  // value.
388
0
  if (JA.isDeviceOffloading(Action::OFK_Cuda)) {
389
0
    GPUArchName = JA.getOffloadingArch();
390
0
  } else {
391
0
    GPUArchName = Args.getLastArgValue(options::OPT_march_EQ);
392
0
    assert(!GPUArchName.empty() && "Must have an architecture passed in.");
393
0
  }
394
395
  // Obtain architecture from the action.
396
0
  CudaArch gpu_arch = StringToCudaArch(GPUArchName);
397
0
  assert(gpu_arch != CudaArch::UNKNOWN &&
398
0
         "Device action expected to have an architecture.");
399
400
  // Check that our installation's ptxas supports gpu_arch.
401
0
  if (!Args.hasArg(options::OPT_no_cuda_version_check)) {
402
0
    TC.CudaInstallation.CheckCudaVersionSupportsArch(gpu_arch);
403
0
  }
404
405
0
  ArgStringList CmdArgs;
406
0
  CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-m64" : "-m32");
407
0
  DeviceDebugInfoLevel DIKind = mustEmitDebugInfo(Args);
408
0
  if (DIKind == EmitSameDebugInfoAsHost) {
409
    // ptxas does not accept -g option if optimization is enabled, so
410
    // we ignore the compiler's -O* options if we want debug info.
411
0
    CmdArgs.push_back("-g");
412
0
    CmdArgs.push_back("--dont-merge-basicblocks");
413
0
    CmdArgs.push_back("--return-at-end");
414
0
  } else if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
415
    // Map the -O we received to -O{0,1,2,3}.
416
    //
417
    // TODO: Perhaps we should map host -O2 to ptxas -O3. -O3 is ptxas's
418
    // default, so it may correspond more closely to the spirit of clang -O2.
419
420
    // -O3 seems like the least-bad option when -Osomething is specified to
421
    // clang but it isn't handled below.
422
0
    StringRef OOpt = "3";
423
0
    if (A->getOption().matches(options::OPT_O4) ||
424
0
        A->getOption().matches(options::OPT_Ofast))
425
0
      OOpt = "3";
426
0
    else if (A->getOption().matches(options::OPT_O0))
427
0
      OOpt = "0";
428
0
    else if (A->getOption().matches(options::OPT_O)) {
429
      // -Os, -Oz, and -O(anything else) map to -O2, for lack of better options.
430
0
      OOpt = llvm::StringSwitch<const char *>(A->getValue())
431
0
                 .Case("1", "1")
432
0
                 .Case("2", "2")
433
0
                 .Case("3", "3")
434
0
                 .Case("s", "2")
435
0
                 .Case("z", "2")
436
0
                 .Default("2");
437
0
    }
438
0
    CmdArgs.push_back(Args.MakeArgString(llvm::Twine("-O") + OOpt));
439
0
  } else {
440
    // If no -O was passed, pass -O0 to ptxas -- no opt flag should correspond
441
    // to no optimizations, but ptxas's default is -O3.
442
0
    CmdArgs.push_back("-O0");
443
0
  }
444
0
  if (DIKind == DebugDirectivesOnly)
445
0
    CmdArgs.push_back("-lineinfo");
446
447
  // Pass -v to ptxas if it was passed to the driver.
448
0
  if (Args.hasArg(options::OPT_v))
449
0
    CmdArgs.push_back("-v");
450
451
0
  CmdArgs.push_back("--gpu-name");
452
0
  CmdArgs.push_back(Args.MakeArgString(CudaArchToString(gpu_arch)));
453
0
  CmdArgs.push_back("--output-file");
454
0
  std::string OutputFileName = TC.getInputFilename(Output);
455
456
  // If we are invoking `nvlink` internally we need to output a `.cubin` file.
457
  // FIXME: This should hopefully be removed if NVIDIA updates their tooling.
458
0
  if (!C.getInputArgs().getLastArg(options::OPT_c)) {
459
0
    SmallString<256> Filename(Output.getFilename());
460
0
    llvm::sys::path::replace_extension(Filename, "cubin");
461
0
    OutputFileName = Filename.str();
462
0
  }
463
0
  if (Output.isFilename() && OutputFileName != Output.getFilename())
464
0
    C.addTempFile(Args.MakeArgString(OutputFileName));
465
466
0
  CmdArgs.push_back(Args.MakeArgString(OutputFileName));
467
0
  for (const auto &II : Inputs)
468
0
    CmdArgs.push_back(Args.MakeArgString(II.getFilename()));
469
470
0
  for (const auto &A : Args.getAllArgValues(options::OPT_Xcuda_ptxas))
471
0
    CmdArgs.push_back(Args.MakeArgString(A));
472
473
0
  bool Relocatable;
474
0
  if (JA.isOffloading(Action::OFK_OpenMP))
475
    // In OpenMP we need to generate relocatable code.
476
0
    Relocatable = Args.hasFlag(options::OPT_fopenmp_relocatable_target,
477
0
                               options::OPT_fnoopenmp_relocatable_target,
478
0
                               /*Default=*/true);
479
0
  else if (JA.isOffloading(Action::OFK_Cuda))
480
    // In CUDA we generate relocatable code by default.
481
0
    Relocatable = Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
482
0
                               /*Default=*/false);
483
0
  else
484
    // Otherwise, we are compiling directly and should create linkable output.
485
0
    Relocatable = true;
486
487
0
  if (Relocatable)
488
0
    CmdArgs.push_back("-c");
489
490
0
  const char *Exec;
491
0
  if (Arg *A = Args.getLastArg(options::OPT_ptxas_path_EQ))
492
0
    Exec = A->getValue();
493
0
  else
494
0
    Exec = Args.MakeArgString(TC.GetProgramPath("ptxas"));
495
0
  C.addCommand(std::make_unique<Command>(
496
0
      JA, *this,
497
0
      ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
498
0
                          "--options-file"},
499
0
      Exec, CmdArgs, Inputs, Output));
500
0
}
501
502
0
static bool shouldIncludePTX(const ArgList &Args, const char *gpu_arch) {
503
0
  bool includePTX = true;
504
0
  for (Arg *A : Args) {
505
0
    if (!(A->getOption().matches(options::OPT_cuda_include_ptx_EQ) ||
506
0
          A->getOption().matches(options::OPT_no_cuda_include_ptx_EQ)))
507
0
      continue;
508
0
    A->claim();
509
0
    const StringRef ArchStr = A->getValue();
510
0
    if (ArchStr == "all" || ArchStr == gpu_arch) {
511
0
      includePTX = A->getOption().matches(options::OPT_cuda_include_ptx_EQ);
512
0
      continue;
513
0
    }
514
0
  }
515
0
  return includePTX;
516
0
}
517
518
// All inputs to this linker must be from CudaDeviceActions, as we need to look
519
// at the Inputs' Actions in order to figure out which GPU architecture they
520
// correspond to.
521
void NVPTX::FatBinary::ConstructJob(Compilation &C, const JobAction &JA,
522
                                    const InputInfo &Output,
523
                                    const InputInfoList &Inputs,
524
                                    const ArgList &Args,
525
0
                                    const char *LinkingOutput) const {
526
0
  const auto &TC =
527
0
      static_cast<const toolchains::CudaToolChain &>(getToolChain());
528
0
  assert(TC.getTriple().isNVPTX() && "Wrong platform");
529
530
0
  ArgStringList CmdArgs;
531
0
  if (TC.CudaInstallation.version() <= CudaVersion::CUDA_100)
532
0
    CmdArgs.push_back("--cuda");
533
0
  CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-64" : "-32");
534
0
  CmdArgs.push_back(Args.MakeArgString("--create"));
535
0
  CmdArgs.push_back(Args.MakeArgString(Output.getFilename()));
536
0
  if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost)
537
0
    CmdArgs.push_back("-g");
538
539
0
  for (const auto &II : Inputs) {
540
0
    auto *A = II.getAction();
541
0
    assert(A->getInputs().size() == 1 &&
542
0
           "Device offload action is expected to have a single input");
543
0
    const char *gpu_arch_str = A->getOffloadingArch();
544
0
    assert(gpu_arch_str &&
545
0
           "Device action expected to have associated a GPU architecture!");
546
0
    CudaArch gpu_arch = StringToCudaArch(gpu_arch_str);
547
548
0
    if (II.getType() == types::TY_PP_Asm &&
549
0
        !shouldIncludePTX(Args, gpu_arch_str))
550
0
      continue;
551
    // We need to pass an Arch of the form "sm_XX" for cubin files and
552
    // "compute_XX" for ptx.
553
0
    const char *Arch = (II.getType() == types::TY_PP_Asm)
554
0
                           ? CudaArchToVirtualArchString(gpu_arch)
555
0
                           : gpu_arch_str;
556
0
    CmdArgs.push_back(
557
0
        Args.MakeArgString(llvm::Twine("--image=profile=") + Arch +
558
0
                           ",file=" + getToolChain().getInputFilename(II)));
559
0
  }
560
561
0
  for (const auto &A : Args.getAllArgValues(options::OPT_Xcuda_fatbinary))
562
0
    CmdArgs.push_back(Args.MakeArgString(A));
563
564
0
  const char *Exec = Args.MakeArgString(TC.GetProgramPath("fatbinary"));
565
0
  C.addCommand(std::make_unique<Command>(
566
0
      JA, *this,
567
0
      ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
568
0
                          "--options-file"},
569
0
      Exec, CmdArgs, Inputs, Output));
570
0
}
571
572
void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
573
                                 const InputInfo &Output,
574
                                 const InputInfoList &Inputs,
575
                                 const ArgList &Args,
576
0
                                 const char *LinkingOutput) const {
577
0
  const auto &TC =
578
0
      static_cast<const toolchains::NVPTXToolChain &>(getToolChain());
579
0
  ArgStringList CmdArgs;
580
581
0
  assert(TC.getTriple().isNVPTX() && "Wrong platform");
582
583
0
  assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
584
0
  if (Output.isFilename()) {
585
0
    CmdArgs.push_back("-o");
586
0
    CmdArgs.push_back(Output.getFilename());
587
0
  }
588
589
0
  if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost)
590
0
    CmdArgs.push_back("-g");
591
592
0
  if (Args.hasArg(options::OPT_v))
593
0
    CmdArgs.push_back("-v");
594
595
0
  StringRef GPUArch = Args.getLastArgValue(options::OPT_march_EQ);
596
0
  assert(!GPUArch.empty() && "At least one GPU Arch required for nvlink.");
597
598
0
  CmdArgs.push_back("-arch");
599
0
  CmdArgs.push_back(Args.MakeArgString(GPUArch));
600
601
  // Add paths specified in LIBRARY_PATH environment variable as -L options.
602
0
  addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
603
604
  // Add paths for the default clang library path.
605
0
  SmallString<256> DefaultLibPath =
606
0
      llvm::sys::path::parent_path(TC.getDriver().Dir);
607
0
  llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
608
0
  CmdArgs.push_back(Args.MakeArgString(Twine("-L") + DefaultLibPath));
609
610
0
  for (const auto &II : Inputs) {
611
0
    if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
612
0
        II.getType() == types::TY_LTO_BC || II.getType() == types::TY_LLVM_BC) {
613
0
      C.getDriver().Diag(diag::err_drv_no_linker_llvm_support)
614
0
          << getToolChain().getTripleString();
615
0
      continue;
616
0
    }
617
618
    // Currently, we only pass the input files to the linker, we do not pass
619
    // any libraries that may be valid only for the host.
620
0
    if (!II.isFilename())
621
0
      continue;
622
623
    // The 'nvlink' application performs RDC-mode linking when given a '.o'
624
    // file and device linking when given a '.cubin' file. We always want to
625
    // perform device linking, so just rename any '.o' files.
626
    // FIXME: This should hopefully be removed if NVIDIA updates their tooling.
627
0
    auto InputFile = getToolChain().getInputFilename(II);
628
0
    if (llvm::sys::path::extension(InputFile) != ".cubin") {
629
      // If there are no actions above this one then this is direct input and we
630
      // can copy it. Otherwise the input is internal so a `.cubin` file should
631
      // exist.
632
0
      if (II.getAction() && II.getAction()->getInputs().size() == 0) {
633
0
        const char *CubinF =
634
0
            Args.MakeArgString(getToolChain().getDriver().GetTemporaryPath(
635
0
                llvm::sys::path::stem(InputFile), "cubin"));
636
0
        if (llvm::sys::fs::copy_file(InputFile, C.addTempFile(CubinF)))
637
0
          continue;
638
639
0
        CmdArgs.push_back(CubinF);
640
0
      } else {
641
0
        SmallString<256> Filename(InputFile);
642
0
        llvm::sys::path::replace_extension(Filename, "cubin");
643
0
        CmdArgs.push_back(Args.MakeArgString(Filename));
644
0
      }
645
0
    } else {
646
0
      CmdArgs.push_back(Args.MakeArgString(InputFile));
647
0
    }
648
0
  }
649
650
0
  C.addCommand(std::make_unique<Command>(
651
0
      JA, *this,
652
0
      ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
653
0
                          "--options-file"},
654
0
      Args.MakeArgString(getToolChain().GetProgramPath("nvlink")), CmdArgs,
655
0
      Inputs, Output));
656
0
}
657
658
void NVPTX::getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
659
                                   const llvm::opt::ArgList &Args,
660
0
                                   std::vector<StringRef> &Features) {
661
0
  if (Args.hasArg(options::OPT_cuda_feature_EQ)) {
662
0
    StringRef PtxFeature =
663
0
        Args.getLastArgValue(options::OPT_cuda_feature_EQ, "+ptx42");
664
0
    Features.push_back(Args.MakeArgString(PtxFeature));
665
0
    return;
666
0
  }
667
0
  CudaInstallationDetector CudaInstallation(D, Triple, Args);
668
669
  // New CUDA versions often introduce new instructions that are only supported
670
  // by new PTX version, so we need to raise PTX level to enable them in NVPTX
671
  // back-end.
672
0
  const char *PtxFeature = nullptr;
673
0
  switch (CudaInstallation.version()) {
674
0
#define CASE_CUDA_VERSION(CUDA_VER, PTX_VER)                                   \
675
0
  case CudaVersion::CUDA_##CUDA_VER:                                           \
676
0
    PtxFeature = "+ptx" #PTX_VER;                                              \
677
0
    break;
678
0
    CASE_CUDA_VERSION(123, 83);
679
0
    CASE_CUDA_VERSION(122, 82);
680
0
    CASE_CUDA_VERSION(121, 81);
681
0
    CASE_CUDA_VERSION(120, 80);
682
0
    CASE_CUDA_VERSION(118, 78);
683
0
    CASE_CUDA_VERSION(117, 77);
684
0
    CASE_CUDA_VERSION(116, 76);
685
0
    CASE_CUDA_VERSION(115, 75);
686
0
    CASE_CUDA_VERSION(114, 74);
687
0
    CASE_CUDA_VERSION(113, 73);
688
0
    CASE_CUDA_VERSION(112, 72);
689
0
    CASE_CUDA_VERSION(111, 71);
690
0
    CASE_CUDA_VERSION(110, 70);
691
0
    CASE_CUDA_VERSION(102, 65);
692
0
    CASE_CUDA_VERSION(101, 64);
693
0
    CASE_CUDA_VERSION(100, 63);
694
0
    CASE_CUDA_VERSION(92, 61);
695
0
    CASE_CUDA_VERSION(91, 61);
696
0
    CASE_CUDA_VERSION(90, 60);
697
0
#undef CASE_CUDA_VERSION
698
0
  default:
699
0
    PtxFeature = "+ptx42";
700
0
  }
701
0
  Features.push_back(PtxFeature);
702
0
}
703
704
/// NVPTX toolchain. Our assembler is ptxas, and our linker is nvlink. This
705
/// operates as a stand-alone version of the NVPTX tools without the host
706
/// toolchain.
707
NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
708
                               const llvm::Triple &HostTriple,
709
                               const ArgList &Args, bool Freestanding = false)
710
    : ToolChain(D, Triple, Args), CudaInstallation(D, HostTriple, Args),
711
0
      Freestanding(Freestanding) {
712
0
  if (CudaInstallation.isValid())
713
0
    getProgramPaths().push_back(std::string(CudaInstallation.getBinPath()));
714
  // Lookup binaries into the driver directory, this is used to
715
  // discover the 'nvptx-arch' executable.
716
0
  getProgramPaths().push_back(getDriver().Dir);
717
0
}
718
719
/// We only need the host triple to locate the CUDA binary utilities, use the
720
/// system's default triple if not provided.
721
NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
722
                               const ArgList &Args)
723
    : NVPTXToolChain(D, Triple, llvm::Triple(LLVM_HOST_TRIPLE), Args,
724
0
                     /*Freestanding=*/true) {}
725
726
llvm::opt::DerivedArgList *
727
NVPTXToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
728
                              StringRef BoundArch,
729
0
                              Action::OffloadKind DeviceOffloadKind) const {
730
0
  DerivedArgList *DAL =
731
0
      ToolChain::TranslateArgs(Args, BoundArch, DeviceOffloadKind);
732
0
  if (!DAL)
733
0
    DAL = new DerivedArgList(Args.getBaseArgs());
734
735
0
  const OptTable &Opts = getDriver().getOpts();
736
737
0
  for (Arg *A : Args)
738
0
    if (!llvm::is_contained(*DAL, A))
739
0
      DAL->append(A);
740
741
0
  if (!DAL->hasArg(options::OPT_march_EQ))
742
0
    DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
743
0
                      CudaArchToString(CudaArch::CudaDefault));
744
745
0
  return DAL;
746
0
}
747
748
void NVPTXToolChain::addClangTargetOptions(
749
    const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
750
0
    Action::OffloadKind DeviceOffloadingKind) const {
751
  // If we are compiling with a standalone NVPTX toolchain we want to try to
752
  // mimic a standard environment as much as possible. So we enable lowering
753
  // ctor / dtor functions to global symbols that can be registered.
754
0
  if (Freestanding)
755
0
    CC1Args.append({"-mllvm", "--nvptx-lower-global-ctor-dtor"});
756
0
}
757
758
0
bool NVPTXToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const {
759
0
  const Option &O = A->getOption();
760
0
  return (O.matches(options::OPT_gN_Group) &&
761
0
          !O.matches(options::OPT_gmodules)) ||
762
0
         O.matches(options::OPT_g_Flag) ||
763
0
         O.matches(options::OPT_ggdbN_Group) || O.matches(options::OPT_ggdb) ||
764
0
         O.matches(options::OPT_gdwarf) || O.matches(options::OPT_gdwarf_2) ||
765
0
         O.matches(options::OPT_gdwarf_3) || O.matches(options::OPT_gdwarf_4) ||
766
0
         O.matches(options::OPT_gdwarf_5) ||
767
0
         O.matches(options::OPT_gcolumn_info);
768
0
}
769
770
void NVPTXToolChain::adjustDebugInfoKind(
771
    llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
772
0
    const ArgList &Args) const {
773
0
  switch (mustEmitDebugInfo(Args)) {
774
0
  case DisableDebugInfo:
775
0
    DebugInfoKind = llvm::codegenoptions::NoDebugInfo;
776
0
    break;
777
0
  case DebugDirectivesOnly:
778
0
    DebugInfoKind = llvm::codegenoptions::DebugDirectivesOnly;
779
0
    break;
780
0
  case EmitSameDebugInfoAsHost:
781
    // Use same debug info level as the host.
782
0
    break;
783
0
  }
784
0
}
785
786
/// CUDA toolchain.  Our assembler is ptxas, and our "linker" is fatbinary,
787
/// which isn't properly a linker but nonetheless performs the step of stitching
788
/// together object files from the assembler into a single blob.
789
790
CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple,
791
                             const ToolChain &HostTC, const ArgList &Args)
792
0
    : NVPTXToolChain(D, Triple, HostTC.getTriple(), Args), HostTC(HostTC) {}
793
794
void CudaToolChain::addClangTargetOptions(
795
    const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
796
0
    Action::OffloadKind DeviceOffloadingKind) const {
797
0
  HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
798
799
0
  StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
800
0
  assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
801
0
  assert((DeviceOffloadingKind == Action::OFK_OpenMP ||
802
0
          DeviceOffloadingKind == Action::OFK_Cuda) &&
803
0
         "Only OpenMP or CUDA offloading kinds are supported for NVIDIA GPUs.");
804
805
0
  if (DeviceOffloadingKind == Action::OFK_Cuda) {
806
0
    CC1Args.append(
807
0
        {"-fcuda-is-device", "-mllvm", "-enable-memcpyopt-without-libcalls"});
808
809
    // Unsized function arguments used for variadics were introduced in CUDA-9.0
810
    // We still do not support generating code that actually uses variadic
811
    // arguments yet, but we do need to allow parsing them as recent CUDA
812
    // headers rely on that. https://github.com/llvm/llvm-project/issues/58410
813
0
    if (CudaInstallation.version() >= CudaVersion::CUDA_90)
814
0
      CC1Args.push_back("-fcuda-allow-variadic-functions");
815
0
  }
816
817
0
  if (DriverArgs.hasArg(options::OPT_nogpulib))
818
0
    return;
819
820
0
  if (DeviceOffloadingKind == Action::OFK_OpenMP &&
821
0
      DriverArgs.hasArg(options::OPT_S))
822
0
    return;
823
824
0
  std::string LibDeviceFile = CudaInstallation.getLibDeviceFile(GpuArch);
825
0
  if (LibDeviceFile.empty()) {
826
0
    getDriver().Diag(diag::err_drv_no_cuda_libdevice) << GpuArch;
827
0
    return;
828
0
  }
829
830
0
  CC1Args.push_back("-mlink-builtin-bitcode");
831
0
  CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
832
833
0
  clang::CudaVersion CudaInstallationVersion = CudaInstallation.version();
834
835
0
  if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr,
836
0
                         options::OPT_fno_cuda_short_ptr, false))
837
0
    CC1Args.append({"-mllvm", "--nvptx-short-ptr"});
838
839
0
  if (CudaInstallationVersion >= CudaVersion::UNKNOWN)
840
0
    CC1Args.push_back(
841
0
        DriverArgs.MakeArgString(Twine("-target-sdk-version=") +
842
0
                                 CudaVersionToString(CudaInstallationVersion)));
843
844
0
  if (DeviceOffloadingKind == Action::OFK_OpenMP) {
845
0
    if (CudaInstallationVersion < CudaVersion::CUDA_92) {
846
0
      getDriver().Diag(
847
0
          diag::err_drv_omp_offload_target_cuda_version_not_support)
848
0
          << CudaVersionToString(CudaInstallationVersion);
849
0
      return;
850
0
    }
851
852
    // Link the bitcode library late if we're using device LTO.
853
0
    if (getDriver().isUsingLTO(/* IsOffload */ true))
854
0
      return;
855
856
0
    addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, GpuArch.str(),
857
0
                       getTriple());
858
0
  }
859
0
}
860
861
llvm::DenormalMode CudaToolChain::getDefaultDenormalModeForType(
862
    const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
863
0
    const llvm::fltSemantics *FPType) const {
864
0
  if (JA.getOffloadingDeviceKind() == Action::OFK_Cuda) {
865
0
    if (FPType && FPType == &llvm::APFloat::IEEEsingle() &&
866
0
        DriverArgs.hasFlag(options::OPT_fgpu_flush_denormals_to_zero,
867
0
                           options::OPT_fno_gpu_flush_denormals_to_zero, false))
868
0
      return llvm::DenormalMode::getPreserveSign();
869
0
  }
870
871
0
  assert(JA.getOffloadingDeviceKind() != Action::OFK_Host);
872
0
  return llvm::DenormalMode::getIEEE();
873
0
}
874
875
void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
876
0
                                       ArgStringList &CC1Args) const {
877
  // Check our CUDA version if we're going to include the CUDA headers.
878
0
  if (!DriverArgs.hasArg(options::OPT_nogpuinc) &&
879
0
      !DriverArgs.hasArg(options::OPT_no_cuda_version_check)) {
880
0
    StringRef Arch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
881
0
    assert(!Arch.empty() && "Must have an explicit GPU arch.");
882
0
    CudaInstallation.CheckCudaVersionSupportsArch(StringToCudaArch(Arch));
883
0
  }
884
0
  CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
885
0
}
886
887
0
std::string CudaToolChain::getInputFilename(const InputInfo &Input) const {
888
  // Only object files are changed, for example assembly files keep their .s
889
  // extensions. If the user requested device-only compilation don't change it.
890
0
  if (Input.getType() != types::TY_Object || getDriver().offloadDeviceOnly())
891
0
    return ToolChain::getInputFilename(Input);
892
893
  // Replace extension for object files with cubin because nvlink relies on
894
  // these particular file names.
895
0
  SmallString<256> Filename(ToolChain::getInputFilename(Input));
896
0
  llvm::sys::path::replace_extension(Filename, "cubin");
897
0
  return std::string(Filename.str());
898
0
}
899
900
llvm::opt::DerivedArgList *
901
CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
902
                             StringRef BoundArch,
903
0
                             Action::OffloadKind DeviceOffloadKind) const {
904
0
  DerivedArgList *DAL =
905
0
      HostTC.TranslateArgs(Args, BoundArch, DeviceOffloadKind);
906
0
  if (!DAL)
907
0
    DAL = new DerivedArgList(Args.getBaseArgs());
908
909
0
  const OptTable &Opts = getDriver().getOpts();
910
911
  // For OpenMP device offloading, append derived arguments. Make sure
912
  // flags are not duplicated.
913
  // Also append the compute capability.
914
0
  if (DeviceOffloadKind == Action::OFK_OpenMP) {
915
0
    for (Arg *A : Args)
916
0
      if (!llvm::is_contained(*DAL, A))
917
0
        DAL->append(A);
918
919
0
    if (!DAL->hasArg(options::OPT_march_EQ)) {
920
0
      StringRef Arch = BoundArch;
921
0
      if (Arch.empty()) {
922
0
        auto ArchsOrErr = getSystemGPUArchs(Args);
923
0
        if (!ArchsOrErr) {
924
0
          std::string ErrMsg =
925
0
              llvm::formatv("{0}", llvm::fmt_consume(ArchsOrErr.takeError()));
926
0
          getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
927
0
              << llvm::Triple::getArchTypeName(getArch()) << ErrMsg << "-march";
928
0
          Arch = CudaArchToString(CudaArch::CudaDefault);
929
0
        } else {
930
0
          Arch = Args.MakeArgString(ArchsOrErr->front());
931
0
        }
932
0
      }
933
0
      DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), Arch);
934
0
    }
935
936
0
    return DAL;
937
0
  }
938
939
0
  for (Arg *A : Args) {
940
0
    DAL->append(A);
941
0
  }
942
943
0
  if (!BoundArch.empty()) {
944
0
    DAL->eraseArg(options::OPT_march_EQ);
945
0
    DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
946
0
                      BoundArch);
947
0
  }
948
0
  return DAL;
949
0
}
950
951
Expected<SmallVector<std::string>>
952
0
CudaToolChain::getSystemGPUArchs(const ArgList &Args) const {
953
  // Detect NVIDIA GPUs availible on the system.
954
0
  std::string Program;
955
0
  if (Arg *A = Args.getLastArg(options::OPT_nvptx_arch_tool_EQ))
956
0
    Program = A->getValue();
957
0
  else
958
0
    Program = GetProgramPath("nvptx-arch");
959
960
0
  auto StdoutOrErr = executeToolChainProgram(Program);
961
0
  if (!StdoutOrErr)
962
0
    return StdoutOrErr.takeError();
963
964
0
  SmallVector<std::string, 1> GPUArchs;
965
0
  for (StringRef Arch : llvm::split((*StdoutOrErr)->getBuffer(), "\n"))
966
0
    if (!Arch.empty())
967
0
      GPUArchs.push_back(Arch.str());
968
969
0
  if (GPUArchs.empty())
970
0
    return llvm::createStringError(std::error_code(),
971
0
                                   "No NVIDIA GPU detected in the system");
972
973
0
  return std::move(GPUArchs);
974
0
}
975
976
0
Tool *NVPTXToolChain::buildAssembler() const {
977
0
  return new tools::NVPTX::Assembler(*this);
978
0
}
979
980
0
Tool *NVPTXToolChain::buildLinker() const {
981
0
  return new tools::NVPTX::Linker(*this);
982
0
}
983
984
0
Tool *CudaToolChain::buildAssembler() const {
985
0
  return new tools::NVPTX::Assembler(*this);
986
0
}
987
988
0
Tool *CudaToolChain::buildLinker() const {
989
0
  return new tools::NVPTX::FatBinary(*this);
990
0
}
991
992
0
void CudaToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
993
0
  HostTC.addClangWarningOptions(CC1Args);
994
0
}
995
996
ToolChain::CXXStdlibType
997
0
CudaToolChain::GetCXXStdlibType(const ArgList &Args) const {
998
0
  return HostTC.GetCXXStdlibType(Args);
999
0
}
1000
1001
void CudaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
1002
0
                                              ArgStringList &CC1Args) const {
1003
0
  HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
1004
1005
0
  if (!DriverArgs.hasArg(options::OPT_nogpuinc) && CudaInstallation.isValid())
1006
0
    CC1Args.append(
1007
0
        {"-internal-isystem",
1008
0
         DriverArgs.MakeArgString(CudaInstallation.getIncludePath())});
1009
0
}
1010
1011
void CudaToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &Args,
1012
0
                                                 ArgStringList &CC1Args) const {
1013
0
  HostTC.AddClangCXXStdlibIncludeArgs(Args, CC1Args);
1014
0
}
1015
1016
void CudaToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
1017
0
                                        ArgStringList &CC1Args) const {
1018
0
  HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
1019
0
}
1020
1021
0
SanitizerMask CudaToolChain::getSupportedSanitizers() const {
1022
  // The CudaToolChain only supports sanitizers in the sense that it allows
1023
  // sanitizer arguments on the command line if they are supported by the host
1024
  // toolchain. The CudaToolChain will actually ignore any command line
1025
  // arguments for any of these "supported" sanitizers. That means that no
1026
  // sanitization of device code is actually supported at this time.
1027
  //
1028
  // This behavior is necessary because the host and device toolchains
1029
  // invocations often share the command line, so the device toolchain must
1030
  // tolerate flags meant only for the host toolchain.
1031
0
  return HostTC.getSupportedSanitizers();
1032
0
}
1033
1034
VersionTuple CudaToolChain::computeMSVCVersion(const Driver *D,
1035
0
                                               const ArgList &Args) const {
1036
0
  return HostTC.computeMSVCVersion(D, Args);
1037
0
}