/src/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- ThreadSafety.cpp ---------------------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // A intra-procedural analysis for thread safety (e.g. deadlocks and race |
10 | | // conditions), based off of an annotation system. |
11 | | // |
12 | | // See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html |
13 | | // for more information. |
14 | | // |
15 | | //===----------------------------------------------------------------------===// |
16 | | |
17 | | #include "clang/Analysis/Analyses/ThreadSafety.h" |
18 | | #include "clang/AST/Attr.h" |
19 | | #include "clang/AST/Decl.h" |
20 | | #include "clang/AST/DeclCXX.h" |
21 | | #include "clang/AST/DeclGroup.h" |
22 | | #include "clang/AST/Expr.h" |
23 | | #include "clang/AST/ExprCXX.h" |
24 | | #include "clang/AST/OperationKinds.h" |
25 | | #include "clang/AST/Stmt.h" |
26 | | #include "clang/AST/StmtVisitor.h" |
27 | | #include "clang/AST/Type.h" |
28 | | #include "clang/Analysis/Analyses/PostOrderCFGView.h" |
29 | | #include "clang/Analysis/Analyses/ThreadSafetyCommon.h" |
30 | | #include "clang/Analysis/Analyses/ThreadSafetyTIL.h" |
31 | | #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h" |
32 | | #include "clang/Analysis/Analyses/ThreadSafetyUtil.h" |
33 | | #include "clang/Analysis/AnalysisDeclContext.h" |
34 | | #include "clang/Analysis/CFG.h" |
35 | | #include "clang/Basic/Builtins.h" |
36 | | #include "clang/Basic/LLVM.h" |
37 | | #include "clang/Basic/OperatorKinds.h" |
38 | | #include "clang/Basic/SourceLocation.h" |
39 | | #include "clang/Basic/Specifiers.h" |
40 | | #include "llvm/ADT/ArrayRef.h" |
41 | | #include "llvm/ADT/DenseMap.h" |
42 | | #include "llvm/ADT/ImmutableMap.h" |
43 | | #include "llvm/ADT/STLExtras.h" |
44 | | #include "llvm/ADT/SmallVector.h" |
45 | | #include "llvm/ADT/StringRef.h" |
46 | | #include "llvm/Support/Allocator.h" |
47 | | #include "llvm/Support/Casting.h" |
48 | | #include "llvm/Support/ErrorHandling.h" |
49 | | #include "llvm/Support/raw_ostream.h" |
50 | | #include <algorithm> |
51 | | #include <cassert> |
52 | | #include <functional> |
53 | | #include <iterator> |
54 | | #include <memory> |
55 | | #include <optional> |
56 | | #include <string> |
57 | | #include <type_traits> |
58 | | #include <utility> |
59 | | #include <vector> |
60 | | |
61 | | using namespace clang; |
62 | | using namespace threadSafety; |
63 | | |
64 | | // Key method definition |
65 | 0 | ThreadSafetyHandler::~ThreadSafetyHandler() = default; |
66 | | |
67 | | /// Issue a warning about an invalid lock expression |
68 | | static void warnInvalidLock(ThreadSafetyHandler &Handler, |
69 | | const Expr *MutexExp, const NamedDecl *D, |
70 | 0 | const Expr *DeclExp, StringRef Kind) { |
71 | 0 | SourceLocation Loc; |
72 | 0 | if (DeclExp) |
73 | 0 | Loc = DeclExp->getExprLoc(); |
74 | | |
75 | | // FIXME: add a note about the attribute location in MutexExp or D |
76 | 0 | if (Loc.isValid()) |
77 | 0 | Handler.handleInvalidLockExp(Loc); |
78 | 0 | } |
79 | | |
80 | | namespace { |
81 | | |
82 | | /// A set of CapabilityExpr objects, which are compiled from thread safety |
83 | | /// attributes on a function. |
84 | | class CapExprSet : public SmallVector<CapabilityExpr, 4> { |
85 | | public: |
86 | | /// Push M onto list, but discard duplicates. |
87 | 0 | void push_back_nodup(const CapabilityExpr &CapE) { |
88 | 0 | if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) { |
89 | 0 | return CapE.equals(CapE2); |
90 | 0 | })) |
91 | 0 | push_back(CapE); |
92 | 0 | } |
93 | | }; |
94 | | |
95 | | class FactManager; |
96 | | class FactSet; |
97 | | |
98 | | /// This is a helper class that stores a fact that is known at a |
99 | | /// particular point in program execution. Currently, a fact is a capability, |
100 | | /// along with additional information, such as where it was acquired, whether |
101 | | /// it is exclusive or shared, etc. |
102 | | /// |
103 | | /// FIXME: this analysis does not currently support re-entrant locking. |
104 | | class FactEntry : public CapabilityExpr { |
105 | | public: |
106 | | /// Where a fact comes from. |
107 | | enum SourceKind { |
108 | | Acquired, ///< The fact has been directly acquired. |
109 | | Asserted, ///< The fact has been asserted to be held. |
110 | | Declared, ///< The fact is assumed to be held by callers. |
111 | | Managed, ///< The fact has been acquired through a scoped capability. |
112 | | }; |
113 | | |
114 | | private: |
115 | | /// Exclusive or shared. |
116 | | LockKind LKind : 8; |
117 | | |
118 | | // How it was acquired. |
119 | | SourceKind Source : 8; |
120 | | |
121 | | /// Where it was acquired. |
122 | | SourceLocation AcquireLoc; |
123 | | |
124 | | public: |
125 | | FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, |
126 | | SourceKind Src) |
127 | 0 | : CapabilityExpr(CE), LKind(LK), Source(Src), AcquireLoc(Loc) {} |
128 | 0 | virtual ~FactEntry() = default; |
129 | | |
130 | 0 | LockKind kind() const { return LKind; } |
131 | 0 | SourceLocation loc() const { return AcquireLoc; } |
132 | | |
133 | 0 | bool asserted() const { return Source == Asserted; } |
134 | 0 | bool declared() const { return Source == Declared; } |
135 | 0 | bool managed() const { return Source == Managed; } |
136 | | |
137 | | virtual void |
138 | | handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, |
139 | | SourceLocation JoinLoc, LockErrorKind LEK, |
140 | | ThreadSafetyHandler &Handler) const = 0; |
141 | | virtual void handleLock(FactSet &FSet, FactManager &FactMan, |
142 | | const FactEntry &entry, |
143 | | ThreadSafetyHandler &Handler) const = 0; |
144 | | virtual void handleUnlock(FactSet &FSet, FactManager &FactMan, |
145 | | const CapabilityExpr &Cp, SourceLocation UnlockLoc, |
146 | | bool FullyRemove, |
147 | | ThreadSafetyHandler &Handler) const = 0; |
148 | | |
149 | | // Return true if LKind >= LK, where exclusive > shared |
150 | 0 | bool isAtLeast(LockKind LK) const { |
151 | 0 | return (LKind == LK_Exclusive) || (LK == LK_Shared); |
152 | 0 | } |
153 | | }; |
154 | | |
155 | | using FactID = unsigned short; |
156 | | |
157 | | /// FactManager manages the memory for all facts that are created during |
158 | | /// the analysis of a single routine. |
159 | | class FactManager { |
160 | | private: |
161 | | std::vector<std::unique_ptr<const FactEntry>> Facts; |
162 | | |
163 | | public: |
164 | 0 | FactID newFact(std::unique_ptr<FactEntry> Entry) { |
165 | 0 | Facts.push_back(std::move(Entry)); |
166 | 0 | return static_cast<unsigned short>(Facts.size() - 1); |
167 | 0 | } |
168 | | |
169 | 0 | const FactEntry &operator[](FactID F) const { return *Facts[F]; } |
170 | | }; |
171 | | |
172 | | /// A FactSet is the set of facts that are known to be true at a |
173 | | /// particular program point. FactSets must be small, because they are |
174 | | /// frequently copied, and are thus implemented as a set of indices into a |
175 | | /// table maintained by a FactManager. A typical FactSet only holds 1 or 2 |
176 | | /// locks, so we can get away with doing a linear search for lookup. Note |
177 | | /// that a hashtable or map is inappropriate in this case, because lookups |
178 | | /// may involve partial pattern matches, rather than exact matches. |
179 | | class FactSet { |
180 | | private: |
181 | | using FactVec = SmallVector<FactID, 4>; |
182 | | |
183 | | FactVec FactIDs; |
184 | | |
185 | | public: |
186 | | using iterator = FactVec::iterator; |
187 | | using const_iterator = FactVec::const_iterator; |
188 | | |
189 | 0 | iterator begin() { return FactIDs.begin(); } |
190 | 0 | const_iterator begin() const { return FactIDs.begin(); } |
191 | | |
192 | 0 | iterator end() { return FactIDs.end(); } |
193 | 0 | const_iterator end() const { return FactIDs.end(); } |
194 | | |
195 | 0 | bool isEmpty() const { return FactIDs.size() == 0; } |
196 | | |
197 | | // Return true if the set contains only negative facts |
198 | 0 | bool isEmpty(FactManager &FactMan) const { |
199 | 0 | for (const auto FID : *this) { |
200 | 0 | if (!FactMan[FID].negative()) |
201 | 0 | return false; |
202 | 0 | } |
203 | 0 | return true; |
204 | 0 | } |
205 | | |
206 | 0 | void addLockByID(FactID ID) { FactIDs.push_back(ID); } |
207 | | |
208 | 0 | FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) { |
209 | 0 | FactID F = FM.newFact(std::move(Entry)); |
210 | 0 | FactIDs.push_back(F); |
211 | 0 | return F; |
212 | 0 | } |
213 | | |
214 | 0 | bool removeLock(FactManager& FM, const CapabilityExpr &CapE) { |
215 | 0 | unsigned n = FactIDs.size(); |
216 | 0 | if (n == 0) |
217 | 0 | return false; |
218 | | |
219 | 0 | for (unsigned i = 0; i < n-1; ++i) { |
220 | 0 | if (FM[FactIDs[i]].matches(CapE)) { |
221 | 0 | FactIDs[i] = FactIDs[n-1]; |
222 | 0 | FactIDs.pop_back(); |
223 | 0 | return true; |
224 | 0 | } |
225 | 0 | } |
226 | 0 | if (FM[FactIDs[n-1]].matches(CapE)) { |
227 | 0 | FactIDs.pop_back(); |
228 | 0 | return true; |
229 | 0 | } |
230 | 0 | return false; |
231 | 0 | } |
232 | | |
233 | 0 | iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) { |
234 | 0 | return std::find_if(begin(), end(), [&](FactID ID) { |
235 | 0 | return FM[ID].matches(CapE); |
236 | 0 | }); |
237 | 0 | } |
238 | | |
239 | 0 | const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const { |
240 | 0 | auto I = std::find_if(begin(), end(), [&](FactID ID) { |
241 | 0 | return FM[ID].matches(CapE); |
242 | 0 | }); |
243 | 0 | return I != end() ? &FM[*I] : nullptr; |
244 | 0 | } |
245 | | |
246 | | const FactEntry *findLockUniv(FactManager &FM, |
247 | 0 | const CapabilityExpr &CapE) const { |
248 | 0 | auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { |
249 | 0 | return FM[ID].matchesUniv(CapE); |
250 | 0 | }); |
251 | 0 | return I != end() ? &FM[*I] : nullptr; |
252 | 0 | } |
253 | | |
254 | | const FactEntry *findPartialMatch(FactManager &FM, |
255 | 0 | const CapabilityExpr &CapE) const { |
256 | 0 | auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { |
257 | 0 | return FM[ID].partiallyMatches(CapE); |
258 | 0 | }); |
259 | 0 | return I != end() ? &FM[*I] : nullptr; |
260 | 0 | } |
261 | | |
262 | 0 | bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const { |
263 | 0 | auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { |
264 | 0 | return FM[ID].valueDecl() == Vd; |
265 | 0 | }); |
266 | 0 | return I != end(); |
267 | 0 | } |
268 | | }; |
269 | | |
270 | | class ThreadSafetyAnalyzer; |
271 | | |
272 | | } // namespace |
273 | | |
274 | | namespace clang { |
275 | | namespace threadSafety { |
276 | | |
277 | | class BeforeSet { |
278 | | private: |
279 | | using BeforeVect = SmallVector<const ValueDecl *, 4>; |
280 | | |
281 | | struct BeforeInfo { |
282 | | BeforeVect Vect; |
283 | | int Visited = 0; |
284 | | |
285 | 0 | BeforeInfo() = default; |
286 | | BeforeInfo(BeforeInfo &&) = default; |
287 | | }; |
288 | | |
289 | | using BeforeMap = |
290 | | llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>; |
291 | | using CycleMap = llvm::DenseMap<const ValueDecl *, bool>; |
292 | | |
293 | | public: |
294 | 0 | BeforeSet() = default; |
295 | | |
296 | | BeforeInfo* insertAttrExprs(const ValueDecl* Vd, |
297 | | ThreadSafetyAnalyzer& Analyzer); |
298 | | |
299 | | BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd, |
300 | | ThreadSafetyAnalyzer &Analyzer); |
301 | | |
302 | | void checkBeforeAfter(const ValueDecl* Vd, |
303 | | const FactSet& FSet, |
304 | | ThreadSafetyAnalyzer& Analyzer, |
305 | | SourceLocation Loc, StringRef CapKind); |
306 | | |
307 | | private: |
308 | | BeforeMap BMap; |
309 | | CycleMap CycMap; |
310 | | }; |
311 | | |
312 | | } // namespace threadSafety |
313 | | } // namespace clang |
314 | | |
315 | | namespace { |
316 | | |
317 | | class LocalVariableMap; |
318 | | |
319 | | using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>; |
320 | | |
321 | | /// A side (entry or exit) of a CFG node. |
322 | | enum CFGBlockSide { CBS_Entry, CBS_Exit }; |
323 | | |
324 | | /// CFGBlockInfo is a struct which contains all the information that is |
325 | | /// maintained for each block in the CFG. See LocalVariableMap for more |
326 | | /// information about the contexts. |
327 | | struct CFGBlockInfo { |
328 | | // Lockset held at entry to block |
329 | | FactSet EntrySet; |
330 | | |
331 | | // Lockset held at exit from block |
332 | | FactSet ExitSet; |
333 | | |
334 | | // Context held at entry to block |
335 | | LocalVarContext EntryContext; |
336 | | |
337 | | // Context held at exit from block |
338 | | LocalVarContext ExitContext; |
339 | | |
340 | | // Location of first statement in block |
341 | | SourceLocation EntryLoc; |
342 | | |
343 | | // Location of last statement in block. |
344 | | SourceLocation ExitLoc; |
345 | | |
346 | | // Used to replay contexts later |
347 | | unsigned EntryIndex; |
348 | | |
349 | | // Is this block reachable? |
350 | | bool Reachable = false; |
351 | | |
352 | 0 | const FactSet &getSet(CFGBlockSide Side) const { |
353 | 0 | return Side == CBS_Entry ? EntrySet : ExitSet; |
354 | 0 | } |
355 | | |
356 | 0 | SourceLocation getLocation(CFGBlockSide Side) const { |
357 | 0 | return Side == CBS_Entry ? EntryLoc : ExitLoc; |
358 | 0 | } |
359 | | |
360 | | private: |
361 | | CFGBlockInfo(LocalVarContext EmptyCtx) |
362 | 0 | : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {} |
363 | | |
364 | | public: |
365 | | static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M); |
366 | | }; |
367 | | |
368 | | // A LocalVariableMap maintains a map from local variables to their currently |
369 | | // valid definitions. It provides SSA-like functionality when traversing the |
370 | | // CFG. Like SSA, each definition or assignment to a variable is assigned a |
371 | | // unique name (an integer), which acts as the SSA name for that definition. |
372 | | // The total set of names is shared among all CFG basic blocks. |
373 | | // Unlike SSA, we do not rewrite expressions to replace local variables declrefs |
374 | | // with their SSA-names. Instead, we compute a Context for each point in the |
375 | | // code, which maps local variables to the appropriate SSA-name. This map |
376 | | // changes with each assignment. |
377 | | // |
378 | | // The map is computed in a single pass over the CFG. Subsequent analyses can |
379 | | // then query the map to find the appropriate Context for a statement, and use |
380 | | // that Context to look up the definitions of variables. |
381 | | class LocalVariableMap { |
382 | | public: |
383 | | using Context = LocalVarContext; |
384 | | |
385 | | /// A VarDefinition consists of an expression, representing the value of the |
386 | | /// variable, along with the context in which that expression should be |
387 | | /// interpreted. A reference VarDefinition does not itself contain this |
388 | | /// information, but instead contains a pointer to a previous VarDefinition. |
389 | | struct VarDefinition { |
390 | | public: |
391 | | friend class LocalVariableMap; |
392 | | |
393 | | // The original declaration for this variable. |
394 | | const NamedDecl *Dec; |
395 | | |
396 | | // The expression for this variable, OR |
397 | | const Expr *Exp = nullptr; |
398 | | |
399 | | // Reference to another VarDefinition |
400 | | unsigned Ref = 0; |
401 | | |
402 | | // The map with which Exp should be interpreted. |
403 | | Context Ctx; |
404 | | |
405 | 0 | bool isReference() const { return !Exp; } |
406 | | |
407 | | private: |
408 | | // Create ordinary variable definition |
409 | | VarDefinition(const NamedDecl *D, const Expr *E, Context C) |
410 | 0 | : Dec(D), Exp(E), Ctx(C) {} |
411 | | |
412 | | // Create reference to previous definition |
413 | | VarDefinition(const NamedDecl *D, unsigned R, Context C) |
414 | 0 | : Dec(D), Ref(R), Ctx(C) {} |
415 | | }; |
416 | | |
417 | | private: |
418 | | Context::Factory ContextFactory; |
419 | | std::vector<VarDefinition> VarDefinitions; |
420 | | std::vector<std::pair<const Stmt *, Context>> SavedContexts; |
421 | | |
422 | | public: |
423 | 0 | LocalVariableMap() { |
424 | | // index 0 is a placeholder for undefined variables (aka phi-nodes). |
425 | 0 | VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext())); |
426 | 0 | } |
427 | | |
428 | | /// Look up a definition, within the given context. |
429 | 0 | const VarDefinition* lookup(const NamedDecl *D, Context Ctx) { |
430 | 0 | const unsigned *i = Ctx.lookup(D); |
431 | 0 | if (!i) |
432 | 0 | return nullptr; |
433 | 0 | assert(*i < VarDefinitions.size()); |
434 | 0 | return &VarDefinitions[*i]; |
435 | 0 | } |
436 | | |
437 | | /// Look up the definition for D within the given context. Returns |
438 | | /// NULL if the expression is not statically known. If successful, also |
439 | | /// modifies Ctx to hold the context of the return Expr. |
440 | 0 | const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) { |
441 | 0 | const unsigned *P = Ctx.lookup(D); |
442 | 0 | if (!P) |
443 | 0 | return nullptr; |
444 | | |
445 | 0 | unsigned i = *P; |
446 | 0 | while (i > 0) { |
447 | 0 | if (VarDefinitions[i].Exp) { |
448 | 0 | Ctx = VarDefinitions[i].Ctx; |
449 | 0 | return VarDefinitions[i].Exp; |
450 | 0 | } |
451 | 0 | i = VarDefinitions[i].Ref; |
452 | 0 | } |
453 | 0 | return nullptr; |
454 | 0 | } |
455 | | |
456 | 0 | Context getEmptyContext() { return ContextFactory.getEmptyMap(); } |
457 | | |
458 | | /// Return the next context after processing S. This function is used by |
459 | | /// clients of the class to get the appropriate context when traversing the |
460 | | /// CFG. It must be called for every assignment or DeclStmt. |
461 | 0 | Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) { |
462 | 0 | if (SavedContexts[CtxIndex+1].first == S) { |
463 | 0 | CtxIndex++; |
464 | 0 | Context Result = SavedContexts[CtxIndex].second; |
465 | 0 | return Result; |
466 | 0 | } |
467 | 0 | return C; |
468 | 0 | } |
469 | | |
470 | 0 | void dumpVarDefinitionName(unsigned i) { |
471 | 0 | if (i == 0) { |
472 | 0 | llvm::errs() << "Undefined"; |
473 | 0 | return; |
474 | 0 | } |
475 | 0 | const NamedDecl *Dec = VarDefinitions[i].Dec; |
476 | 0 | if (!Dec) { |
477 | 0 | llvm::errs() << "<<NULL>>"; |
478 | 0 | return; |
479 | 0 | } |
480 | 0 | Dec->printName(llvm::errs()); |
481 | 0 | llvm::errs() << "." << i << " " << ((const void*) Dec); |
482 | 0 | } |
483 | | |
484 | | /// Dumps an ASCII representation of the variable map to llvm::errs() |
485 | 0 | void dump() { |
486 | 0 | for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { |
487 | 0 | const Expr *Exp = VarDefinitions[i].Exp; |
488 | 0 | unsigned Ref = VarDefinitions[i].Ref; |
489 | 0 |
|
490 | 0 | dumpVarDefinitionName(i); |
491 | 0 | llvm::errs() << " = "; |
492 | 0 | if (Exp) Exp->dump(); |
493 | 0 | else { |
494 | 0 | dumpVarDefinitionName(Ref); |
495 | 0 | llvm::errs() << "\n"; |
496 | 0 | } |
497 | 0 | } |
498 | 0 | } |
499 | | |
500 | | /// Dumps an ASCII representation of a Context to llvm::errs() |
501 | 0 | void dumpContext(Context C) { |
502 | 0 | for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { |
503 | 0 | const NamedDecl *D = I.getKey(); |
504 | 0 | D->printName(llvm::errs()); |
505 | 0 | llvm::errs() << " -> "; |
506 | 0 | dumpVarDefinitionName(I.getData()); |
507 | 0 | llvm::errs() << "\n"; |
508 | 0 | } |
509 | 0 | } |
510 | | |
511 | | /// Builds the variable map. |
512 | | void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph, |
513 | | std::vector<CFGBlockInfo> &BlockInfo); |
514 | | |
515 | | protected: |
516 | | friend class VarMapBuilder; |
517 | | |
518 | | // Get the current context index |
519 | 0 | unsigned getContextIndex() { return SavedContexts.size()-1; } |
520 | | |
521 | | // Save the current context for later replay |
522 | 0 | void saveContext(const Stmt *S, Context C) { |
523 | 0 | SavedContexts.push_back(std::make_pair(S, C)); |
524 | 0 | } |
525 | | |
526 | | // Adds a new definition to the given context, and returns a new context. |
527 | | // This method should be called when declaring a new variable. |
528 | 0 | Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) { |
529 | 0 | assert(!Ctx.contains(D)); |
530 | 0 | unsigned newID = VarDefinitions.size(); |
531 | 0 | Context NewCtx = ContextFactory.add(Ctx, D, newID); |
532 | 0 | VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); |
533 | 0 | return NewCtx; |
534 | 0 | } |
535 | | |
536 | | // Add a new reference to an existing definition. |
537 | 0 | Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { |
538 | 0 | unsigned newID = VarDefinitions.size(); |
539 | 0 | Context NewCtx = ContextFactory.add(Ctx, D, newID); |
540 | 0 | VarDefinitions.push_back(VarDefinition(D, i, Ctx)); |
541 | 0 | return NewCtx; |
542 | 0 | } |
543 | | |
544 | | // Updates a definition only if that definition is already in the map. |
545 | | // This method should be called when assigning to an existing variable. |
546 | 0 | Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { |
547 | 0 | if (Ctx.contains(D)) { |
548 | 0 | unsigned newID = VarDefinitions.size(); |
549 | 0 | Context NewCtx = ContextFactory.remove(Ctx, D); |
550 | 0 | NewCtx = ContextFactory.add(NewCtx, D, newID); |
551 | 0 | VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); |
552 | 0 | return NewCtx; |
553 | 0 | } |
554 | 0 | return Ctx; |
555 | 0 | } |
556 | | |
557 | | // Removes a definition from the context, but keeps the variable name |
558 | | // as a valid variable. The index 0 is a placeholder for cleared definitions. |
559 | 0 | Context clearDefinition(const NamedDecl *D, Context Ctx) { |
560 | 0 | Context NewCtx = Ctx; |
561 | 0 | if (NewCtx.contains(D)) { |
562 | 0 | NewCtx = ContextFactory.remove(NewCtx, D); |
563 | 0 | NewCtx = ContextFactory.add(NewCtx, D, 0); |
564 | 0 | } |
565 | 0 | return NewCtx; |
566 | 0 | } |
567 | | |
568 | | // Remove a definition entirely frmo the context. |
569 | 0 | Context removeDefinition(const NamedDecl *D, Context Ctx) { |
570 | 0 | Context NewCtx = Ctx; |
571 | 0 | if (NewCtx.contains(D)) { |
572 | 0 | NewCtx = ContextFactory.remove(NewCtx, D); |
573 | 0 | } |
574 | 0 | return NewCtx; |
575 | 0 | } |
576 | | |
577 | | Context intersectContexts(Context C1, Context C2); |
578 | | Context createReferenceContext(Context C); |
579 | | void intersectBackEdge(Context C1, Context C2); |
580 | | }; |
581 | | |
582 | | } // namespace |
583 | | |
584 | | // This has to be defined after LocalVariableMap. |
585 | 0 | CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) { |
586 | 0 | return CFGBlockInfo(M.getEmptyContext()); |
587 | 0 | } |
588 | | |
589 | | namespace { |
590 | | |
591 | | /// Visitor which builds a LocalVariableMap |
592 | | class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> { |
593 | | public: |
594 | | LocalVariableMap* VMap; |
595 | | LocalVariableMap::Context Ctx; |
596 | | |
597 | | VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) |
598 | 0 | : VMap(VM), Ctx(C) {} |
599 | | |
600 | | void VisitDeclStmt(const DeclStmt *S); |
601 | | void VisitBinaryOperator(const BinaryOperator *BO); |
602 | | }; |
603 | | |
604 | | } // namespace |
605 | | |
606 | | // Add new local variables to the variable map |
607 | 0 | void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) { |
608 | 0 | bool modifiedCtx = false; |
609 | 0 | const DeclGroupRef DGrp = S->getDeclGroup(); |
610 | 0 | for (const auto *D : DGrp) { |
611 | 0 | if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) { |
612 | 0 | const Expr *E = VD->getInit(); |
613 | | |
614 | | // Add local variables with trivial type to the variable map |
615 | 0 | QualType T = VD->getType(); |
616 | 0 | if (T.isTrivialType(VD->getASTContext())) { |
617 | 0 | Ctx = VMap->addDefinition(VD, E, Ctx); |
618 | 0 | modifiedCtx = true; |
619 | 0 | } |
620 | 0 | } |
621 | 0 | } |
622 | 0 | if (modifiedCtx) |
623 | 0 | VMap->saveContext(S, Ctx); |
624 | 0 | } |
625 | | |
626 | | // Update local variable definitions in variable map |
627 | 0 | void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) { |
628 | 0 | if (!BO->isAssignmentOp()) |
629 | 0 | return; |
630 | | |
631 | 0 | Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); |
632 | | |
633 | | // Update the variable map and current context. |
634 | 0 | if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { |
635 | 0 | const ValueDecl *VDec = DRE->getDecl(); |
636 | 0 | if (Ctx.lookup(VDec)) { |
637 | 0 | if (BO->getOpcode() == BO_Assign) |
638 | 0 | Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); |
639 | 0 | else |
640 | | // FIXME -- handle compound assignment operators |
641 | 0 | Ctx = VMap->clearDefinition(VDec, Ctx); |
642 | 0 | VMap->saveContext(BO, Ctx); |
643 | 0 | } |
644 | 0 | } |
645 | 0 | } |
646 | | |
647 | | // Computes the intersection of two contexts. The intersection is the |
648 | | // set of variables which have the same definition in both contexts; |
649 | | // variables with different definitions are discarded. |
650 | | LocalVariableMap::Context |
651 | 0 | LocalVariableMap::intersectContexts(Context C1, Context C2) { |
652 | 0 | Context Result = C1; |
653 | 0 | for (const auto &P : C1) { |
654 | 0 | const NamedDecl *Dec = P.first; |
655 | 0 | const unsigned *i2 = C2.lookup(Dec); |
656 | 0 | if (!i2) // variable doesn't exist on second path |
657 | 0 | Result = removeDefinition(Dec, Result); |
658 | 0 | else if (*i2 != P.second) // variable exists, but has different definition |
659 | 0 | Result = clearDefinition(Dec, Result); |
660 | 0 | } |
661 | 0 | return Result; |
662 | 0 | } |
663 | | |
664 | | // For every variable in C, create a new variable that refers to the |
665 | | // definition in C. Return a new context that contains these new variables. |
666 | | // (We use this for a naive implementation of SSA on loop back-edges.) |
667 | 0 | LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { |
668 | 0 | Context Result = getEmptyContext(); |
669 | 0 | for (const auto &P : C) |
670 | 0 | Result = addReference(P.first, P.second, Result); |
671 | 0 | return Result; |
672 | 0 | } |
673 | | |
674 | | // This routine also takes the intersection of C1 and C2, but it does so by |
675 | | // altering the VarDefinitions. C1 must be the result of an earlier call to |
676 | | // createReferenceContext. |
677 | 0 | void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { |
678 | 0 | for (const auto &P : C1) { |
679 | 0 | unsigned i1 = P.second; |
680 | 0 | VarDefinition *VDef = &VarDefinitions[i1]; |
681 | 0 | assert(VDef->isReference()); |
682 | | |
683 | 0 | const unsigned *i2 = C2.lookup(P.first); |
684 | 0 | if (!i2 || (*i2 != i1)) |
685 | 0 | VDef->Ref = 0; // Mark this variable as undefined |
686 | 0 | } |
687 | 0 | } |
688 | | |
689 | | // Traverse the CFG in topological order, so all predecessors of a block |
690 | | // (excluding back-edges) are visited before the block itself. At |
691 | | // each point in the code, we calculate a Context, which holds the set of |
692 | | // variable definitions which are visible at that point in execution. |
693 | | // Visible variables are mapped to their definitions using an array that |
694 | | // contains all definitions. |
695 | | // |
696 | | // At join points in the CFG, the set is computed as the intersection of |
697 | | // the incoming sets along each edge, E.g. |
698 | | // |
699 | | // { Context | VarDefinitions } |
700 | | // int x = 0; { x -> x1 | x1 = 0 } |
701 | | // int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } |
702 | | // if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } |
703 | | // else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } |
704 | | // ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } |
705 | | // |
706 | | // This is essentially a simpler and more naive version of the standard SSA |
707 | | // algorithm. Those definitions that remain in the intersection are from blocks |
708 | | // that strictly dominate the current block. We do not bother to insert proper |
709 | | // phi nodes, because they are not used in our analysis; instead, wherever |
710 | | // a phi node would be required, we simply remove that definition from the |
711 | | // context (E.g. x above). |
712 | | // |
713 | | // The initial traversal does not capture back-edges, so those need to be |
714 | | // handled on a separate pass. Whenever the first pass encounters an |
715 | | // incoming back edge, it duplicates the context, creating new definitions |
716 | | // that refer back to the originals. (These correspond to places where SSA |
717 | | // might have to insert a phi node.) On the second pass, these definitions are |
718 | | // set to NULL if the variable has changed on the back-edge (i.e. a phi |
719 | | // node was actually required.) E.g. |
720 | | // |
721 | | // { Context | VarDefinitions } |
722 | | // int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } |
723 | | // while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } |
724 | | // x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } |
725 | | // ... { y -> y1 | x3 = 2, x2 = 1, ... } |
726 | | void LocalVariableMap::traverseCFG(CFG *CFGraph, |
727 | | const PostOrderCFGView *SortedGraph, |
728 | 0 | std::vector<CFGBlockInfo> &BlockInfo) { |
729 | 0 | PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); |
730 | |
|
731 | 0 | for (const auto *CurrBlock : *SortedGraph) { |
732 | 0 | unsigned CurrBlockID = CurrBlock->getBlockID(); |
733 | 0 | CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; |
734 | |
|
735 | 0 | VisitedBlocks.insert(CurrBlock); |
736 | | |
737 | | // Calculate the entry context for the current block |
738 | 0 | bool HasBackEdges = false; |
739 | 0 | bool CtxInit = true; |
740 | 0 | for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), |
741 | 0 | PE = CurrBlock->pred_end(); PI != PE; ++PI) { |
742 | | // if *PI -> CurrBlock is a back edge, so skip it |
743 | 0 | if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) { |
744 | 0 | HasBackEdges = true; |
745 | 0 | continue; |
746 | 0 | } |
747 | | |
748 | 0 | unsigned PrevBlockID = (*PI)->getBlockID(); |
749 | 0 | CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; |
750 | |
|
751 | 0 | if (CtxInit) { |
752 | 0 | CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; |
753 | 0 | CtxInit = false; |
754 | 0 | } |
755 | 0 | else { |
756 | 0 | CurrBlockInfo->EntryContext = |
757 | 0 | intersectContexts(CurrBlockInfo->EntryContext, |
758 | 0 | PrevBlockInfo->ExitContext); |
759 | 0 | } |
760 | 0 | } |
761 | | |
762 | | // Duplicate the context if we have back-edges, so we can call |
763 | | // intersectBackEdges later. |
764 | 0 | if (HasBackEdges) |
765 | 0 | CurrBlockInfo->EntryContext = |
766 | 0 | createReferenceContext(CurrBlockInfo->EntryContext); |
767 | | |
768 | | // Create a starting context index for the current block |
769 | 0 | saveContext(nullptr, CurrBlockInfo->EntryContext); |
770 | 0 | CurrBlockInfo->EntryIndex = getContextIndex(); |
771 | | |
772 | | // Visit all the statements in the basic block. |
773 | 0 | VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); |
774 | 0 | for (const auto &BI : *CurrBlock) { |
775 | 0 | switch (BI.getKind()) { |
776 | 0 | case CFGElement::Statement: { |
777 | 0 | CFGStmt CS = BI.castAs<CFGStmt>(); |
778 | 0 | VMapBuilder.Visit(CS.getStmt()); |
779 | 0 | break; |
780 | 0 | } |
781 | 0 | default: |
782 | 0 | break; |
783 | 0 | } |
784 | 0 | } |
785 | 0 | CurrBlockInfo->ExitContext = VMapBuilder.Ctx; |
786 | | |
787 | | // Mark variables on back edges as "unknown" if they've been changed. |
788 | 0 | for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), |
789 | 0 | SE = CurrBlock->succ_end(); SI != SE; ++SI) { |
790 | | // if CurrBlock -> *SI is *not* a back edge |
791 | 0 | if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) |
792 | 0 | continue; |
793 | | |
794 | 0 | CFGBlock *FirstLoopBlock = *SI; |
795 | 0 | Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; |
796 | 0 | Context LoopEnd = CurrBlockInfo->ExitContext; |
797 | 0 | intersectBackEdge(LoopBegin, LoopEnd); |
798 | 0 | } |
799 | 0 | } |
800 | | |
801 | | // Put an extra entry at the end of the indexed context array |
802 | 0 | unsigned exitID = CFGraph->getExit().getBlockID(); |
803 | 0 | saveContext(nullptr, BlockInfo[exitID].ExitContext); |
804 | 0 | } |
805 | | |
806 | | /// Find the appropriate source locations to use when producing diagnostics for |
807 | | /// each block in the CFG. |
808 | | static void findBlockLocations(CFG *CFGraph, |
809 | | const PostOrderCFGView *SortedGraph, |
810 | 0 | std::vector<CFGBlockInfo> &BlockInfo) { |
811 | 0 | for (const auto *CurrBlock : *SortedGraph) { |
812 | 0 | CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; |
813 | | |
814 | | // Find the source location of the last statement in the block, if the |
815 | | // block is not empty. |
816 | 0 | if (const Stmt *S = CurrBlock->getTerminatorStmt()) { |
817 | 0 | CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc(); |
818 | 0 | } else { |
819 | 0 | for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), |
820 | 0 | BE = CurrBlock->rend(); BI != BE; ++BI) { |
821 | | // FIXME: Handle other CFGElement kinds. |
822 | 0 | if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { |
823 | 0 | CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc(); |
824 | 0 | break; |
825 | 0 | } |
826 | 0 | } |
827 | 0 | } |
828 | |
|
829 | 0 | if (CurrBlockInfo->ExitLoc.isValid()) { |
830 | | // This block contains at least one statement. Find the source location |
831 | | // of the first statement in the block. |
832 | 0 | for (const auto &BI : *CurrBlock) { |
833 | | // FIXME: Handle other CFGElement kinds. |
834 | 0 | if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) { |
835 | 0 | CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc(); |
836 | 0 | break; |
837 | 0 | } |
838 | 0 | } |
839 | 0 | } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && |
840 | 0 | CurrBlock != &CFGraph->getExit()) { |
841 | | // The block is empty, and has a single predecessor. Use its exit |
842 | | // location. |
843 | 0 | CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = |
844 | 0 | BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; |
845 | 0 | } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) { |
846 | | // The block is empty, and has a single successor. Use its entry |
847 | | // location. |
848 | 0 | CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = |
849 | 0 | BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc; |
850 | 0 | } |
851 | 0 | } |
852 | 0 | } |
853 | | |
854 | | namespace { |
855 | | |
856 | | class LockableFactEntry : public FactEntry { |
857 | | public: |
858 | | LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, |
859 | | SourceKind Src = Acquired) |
860 | 0 | : FactEntry(CE, LK, Loc, Src) {} |
861 | | |
862 | | void |
863 | | handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, |
864 | | SourceLocation JoinLoc, LockErrorKind LEK, |
865 | 0 | ThreadSafetyHandler &Handler) const override { |
866 | 0 | if (!asserted() && !negative() && !isUniversal()) { |
867 | 0 | Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc, |
868 | 0 | LEK); |
869 | 0 | } |
870 | 0 | } |
871 | | |
872 | | void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, |
873 | 0 | ThreadSafetyHandler &Handler) const override { |
874 | 0 | Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(), |
875 | 0 | entry.loc()); |
876 | 0 | } |
877 | | |
878 | | void handleUnlock(FactSet &FSet, FactManager &FactMan, |
879 | | const CapabilityExpr &Cp, SourceLocation UnlockLoc, |
880 | | bool FullyRemove, |
881 | 0 | ThreadSafetyHandler &Handler) const override { |
882 | 0 | FSet.removeLock(FactMan, Cp); |
883 | 0 | if (!Cp.negative()) { |
884 | 0 | FSet.addLock(FactMan, std::make_unique<LockableFactEntry>( |
885 | 0 | !Cp, LK_Exclusive, UnlockLoc)); |
886 | 0 | } |
887 | 0 | } |
888 | | }; |
889 | | |
890 | | class ScopedLockableFactEntry : public FactEntry { |
891 | | private: |
892 | | enum UnderlyingCapabilityKind { |
893 | | UCK_Acquired, ///< Any kind of acquired capability. |
894 | | UCK_ReleasedShared, ///< Shared capability that was released. |
895 | | UCK_ReleasedExclusive, ///< Exclusive capability that was released. |
896 | | }; |
897 | | |
898 | | struct UnderlyingCapability { |
899 | | CapabilityExpr Cap; |
900 | | UnderlyingCapabilityKind Kind; |
901 | | }; |
902 | | |
903 | | SmallVector<UnderlyingCapability, 2> UnderlyingMutexes; |
904 | | |
905 | | public: |
906 | | ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc) |
907 | 0 | : FactEntry(CE, LK_Exclusive, Loc, Acquired) {} |
908 | | |
909 | 0 | void addLock(const CapabilityExpr &M) { |
910 | 0 | UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_Acquired}); |
911 | 0 | } |
912 | | |
913 | 0 | void addExclusiveUnlock(const CapabilityExpr &M) { |
914 | 0 | UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedExclusive}); |
915 | 0 | } |
916 | | |
917 | 0 | void addSharedUnlock(const CapabilityExpr &M) { |
918 | 0 | UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedShared}); |
919 | 0 | } |
920 | | |
921 | | void |
922 | | handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, |
923 | | SourceLocation JoinLoc, LockErrorKind LEK, |
924 | 0 | ThreadSafetyHandler &Handler) const override { |
925 | 0 | for (const auto &UnderlyingMutex : UnderlyingMutexes) { |
926 | 0 | const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap); |
927 | 0 | if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) || |
928 | 0 | (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) { |
929 | | // If this scoped lock manages another mutex, and if the underlying |
930 | | // mutex is still/not held, then warn about the underlying mutex. |
931 | 0 | Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(), |
932 | 0 | UnderlyingMutex.Cap.toString(), loc(), |
933 | 0 | JoinLoc, LEK); |
934 | 0 | } |
935 | 0 | } |
936 | 0 | } |
937 | | |
938 | | void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, |
939 | 0 | ThreadSafetyHandler &Handler) const override { |
940 | 0 | for (const auto &UnderlyingMutex : UnderlyingMutexes) { |
941 | 0 | if (UnderlyingMutex.Kind == UCK_Acquired) |
942 | 0 | lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(), |
943 | 0 | &Handler); |
944 | 0 | else |
945 | 0 | unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler); |
946 | 0 | } |
947 | 0 | } |
948 | | |
949 | | void handleUnlock(FactSet &FSet, FactManager &FactMan, |
950 | | const CapabilityExpr &Cp, SourceLocation UnlockLoc, |
951 | | bool FullyRemove, |
952 | 0 | ThreadSafetyHandler &Handler) const override { |
953 | 0 | assert(!Cp.negative() && "Managing object cannot be negative."); |
954 | 0 | for (const auto &UnderlyingMutex : UnderlyingMutexes) { |
955 | | // Remove/lock the underlying mutex if it exists/is still unlocked; warn |
956 | | // on double unlocking/locking if we're not destroying the scoped object. |
957 | 0 | ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler; |
958 | 0 | if (UnderlyingMutex.Kind == UCK_Acquired) { |
959 | 0 | unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler); |
960 | 0 | } else { |
961 | 0 | LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared |
962 | 0 | ? LK_Shared |
963 | 0 | : LK_Exclusive; |
964 | 0 | lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler); |
965 | 0 | } |
966 | 0 | } |
967 | 0 | if (FullyRemove) |
968 | 0 | FSet.removeLock(FactMan, Cp); |
969 | 0 | } |
970 | | |
971 | | private: |
972 | | void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp, |
973 | | LockKind kind, SourceLocation loc, |
974 | 0 | ThreadSafetyHandler *Handler) const { |
975 | 0 | if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) { |
976 | 0 | if (Handler) |
977 | 0 | Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact->loc(), |
978 | 0 | loc); |
979 | 0 | } else { |
980 | 0 | FSet.removeLock(FactMan, !Cp); |
981 | 0 | FSet.addLock(FactMan, |
982 | 0 | std::make_unique<LockableFactEntry>(Cp, kind, loc, Managed)); |
983 | 0 | } |
984 | 0 | } |
985 | | |
986 | | void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp, |
987 | 0 | SourceLocation loc, ThreadSafetyHandler *Handler) const { |
988 | 0 | if (FSet.findLock(FactMan, Cp)) { |
989 | 0 | FSet.removeLock(FactMan, Cp); |
990 | 0 | FSet.addLock(FactMan, std::make_unique<LockableFactEntry>( |
991 | 0 | !Cp, LK_Exclusive, loc)); |
992 | 0 | } else if (Handler) { |
993 | 0 | SourceLocation PrevLoc; |
994 | 0 | if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp)) |
995 | 0 | PrevLoc = Neg->loc(); |
996 | 0 | Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc); |
997 | 0 | } |
998 | 0 | } |
999 | | }; |
1000 | | |
1001 | | /// Class which implements the core thread safety analysis routines. |
1002 | | class ThreadSafetyAnalyzer { |
1003 | | friend class BuildLockset; |
1004 | | friend class threadSafety::BeforeSet; |
1005 | | |
1006 | | llvm::BumpPtrAllocator Bpa; |
1007 | | threadSafety::til::MemRegionRef Arena; |
1008 | | threadSafety::SExprBuilder SxBuilder; |
1009 | | |
1010 | | ThreadSafetyHandler &Handler; |
1011 | | const FunctionDecl *CurrentFunction; |
1012 | | LocalVariableMap LocalVarMap; |
1013 | | // Maps constructed objects to `this` placeholder prior to initialization. |
1014 | | llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects; |
1015 | | FactManager FactMan; |
1016 | | std::vector<CFGBlockInfo> BlockInfo; |
1017 | | |
1018 | | BeforeSet *GlobalBeforeSet; |
1019 | | |
1020 | | public: |
1021 | | ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset) |
1022 | 0 | : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {} |
1023 | | |
1024 | | bool inCurrentScope(const CapabilityExpr &CapE); |
1025 | | |
1026 | | void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry, |
1027 | | bool ReqAttr = false); |
1028 | | void removeLock(FactSet &FSet, const CapabilityExpr &CapE, |
1029 | | SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind); |
1030 | | |
1031 | | template <typename AttrType> |
1032 | | void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, |
1033 | | const NamedDecl *D, til::SExpr *Self = nullptr); |
1034 | | |
1035 | | template <class AttrType> |
1036 | | void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, |
1037 | | const NamedDecl *D, |
1038 | | const CFGBlock *PredBlock, const CFGBlock *CurrBlock, |
1039 | | Expr *BrE, bool Neg); |
1040 | | |
1041 | | const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, |
1042 | | bool &Negate); |
1043 | | |
1044 | | void getEdgeLockset(FactSet &Result, const FactSet &ExitSet, |
1045 | | const CFGBlock* PredBlock, |
1046 | | const CFGBlock *CurrBlock); |
1047 | | |
1048 | | bool join(const FactEntry &a, const FactEntry &b, bool CanModify); |
1049 | | |
1050 | | void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet, |
1051 | | SourceLocation JoinLoc, LockErrorKind EntryLEK, |
1052 | | LockErrorKind ExitLEK); |
1053 | | |
1054 | | void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet, |
1055 | 0 | SourceLocation JoinLoc, LockErrorKind LEK) { |
1056 | 0 | intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK); |
1057 | 0 | } |
1058 | | |
1059 | | void runAnalysis(AnalysisDeclContext &AC); |
1060 | | |
1061 | | void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D, |
1062 | | const Expr *Exp, AccessKind AK, Expr *MutexExp, |
1063 | | ProtectedOperationKind POK, til::LiteralPtr *Self, |
1064 | | SourceLocation Loc); |
1065 | | void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp, |
1066 | | Expr *MutexExp, til::LiteralPtr *Self, |
1067 | | SourceLocation Loc); |
1068 | | |
1069 | | void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK, |
1070 | | ProtectedOperationKind POK); |
1071 | | void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK, |
1072 | | ProtectedOperationKind POK); |
1073 | | }; |
1074 | | |
1075 | | } // namespace |
1076 | | |
1077 | | /// Process acquired_before and acquired_after attributes on Vd. |
1078 | | BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd, |
1079 | 0 | ThreadSafetyAnalyzer& Analyzer) { |
1080 | | // Create a new entry for Vd. |
1081 | 0 | BeforeInfo *Info = nullptr; |
1082 | 0 | { |
1083 | | // Keep InfoPtr in its own scope in case BMap is modified later and the |
1084 | | // reference becomes invalid. |
1085 | 0 | std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd]; |
1086 | 0 | if (!InfoPtr) |
1087 | 0 | InfoPtr.reset(new BeforeInfo()); |
1088 | 0 | Info = InfoPtr.get(); |
1089 | 0 | } |
1090 | |
|
1091 | 0 | for (const auto *At : Vd->attrs()) { |
1092 | 0 | switch (At->getKind()) { |
1093 | 0 | case attr::AcquiredBefore: { |
1094 | 0 | const auto *A = cast<AcquiredBeforeAttr>(At); |
1095 | | |
1096 | | // Read exprs from the attribute, and add them to BeforeVect. |
1097 | 0 | for (const auto *Arg : A->args()) { |
1098 | 0 | CapabilityExpr Cp = |
1099 | 0 | Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); |
1100 | 0 | if (const ValueDecl *Cpvd = Cp.valueDecl()) { |
1101 | 0 | Info->Vect.push_back(Cpvd); |
1102 | 0 | const auto It = BMap.find(Cpvd); |
1103 | 0 | if (It == BMap.end()) |
1104 | 0 | insertAttrExprs(Cpvd, Analyzer); |
1105 | 0 | } |
1106 | 0 | } |
1107 | 0 | break; |
1108 | 0 | } |
1109 | 0 | case attr::AcquiredAfter: { |
1110 | 0 | const auto *A = cast<AcquiredAfterAttr>(At); |
1111 | | |
1112 | | // Read exprs from the attribute, and add them to BeforeVect. |
1113 | 0 | for (const auto *Arg : A->args()) { |
1114 | 0 | CapabilityExpr Cp = |
1115 | 0 | Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); |
1116 | 0 | if (const ValueDecl *ArgVd = Cp.valueDecl()) { |
1117 | | // Get entry for mutex listed in attribute |
1118 | 0 | BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer); |
1119 | 0 | ArgInfo->Vect.push_back(Vd); |
1120 | 0 | } |
1121 | 0 | } |
1122 | 0 | break; |
1123 | 0 | } |
1124 | 0 | default: |
1125 | 0 | break; |
1126 | 0 | } |
1127 | 0 | } |
1128 | | |
1129 | 0 | return Info; |
1130 | 0 | } |
1131 | | |
1132 | | BeforeSet::BeforeInfo * |
1133 | | BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd, |
1134 | 0 | ThreadSafetyAnalyzer &Analyzer) { |
1135 | 0 | auto It = BMap.find(Vd); |
1136 | 0 | BeforeInfo *Info = nullptr; |
1137 | 0 | if (It == BMap.end()) |
1138 | 0 | Info = insertAttrExprs(Vd, Analyzer); |
1139 | 0 | else |
1140 | 0 | Info = It->second.get(); |
1141 | 0 | assert(Info && "BMap contained nullptr?"); |
1142 | 0 | return Info; |
1143 | 0 | } |
1144 | | |
1145 | | /// Return true if any mutexes in FSet are in the acquired_before set of Vd. |
1146 | | void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd, |
1147 | | const FactSet& FSet, |
1148 | | ThreadSafetyAnalyzer& Analyzer, |
1149 | 0 | SourceLocation Loc, StringRef CapKind) { |
1150 | 0 | SmallVector<BeforeInfo*, 8> InfoVect; |
1151 | | |
1152 | | // Do a depth-first traversal of Vd. |
1153 | | // Return true if there are cycles. |
1154 | 0 | std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) { |
1155 | 0 | if (!Vd) |
1156 | 0 | return false; |
1157 | | |
1158 | 0 | BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer); |
1159 | |
|
1160 | 0 | if (Info->Visited == 1) |
1161 | 0 | return true; |
1162 | | |
1163 | 0 | if (Info->Visited == 2) |
1164 | 0 | return false; |
1165 | | |
1166 | 0 | if (Info->Vect.empty()) |
1167 | 0 | return false; |
1168 | | |
1169 | 0 | InfoVect.push_back(Info); |
1170 | 0 | Info->Visited = 1; |
1171 | 0 | for (const auto *Vdb : Info->Vect) { |
1172 | | // Exclude mutexes in our immediate before set. |
1173 | 0 | if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) { |
1174 | 0 | StringRef L1 = StartVd->getName(); |
1175 | 0 | StringRef L2 = Vdb->getName(); |
1176 | 0 | Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc); |
1177 | 0 | } |
1178 | | // Transitively search other before sets, and warn on cycles. |
1179 | 0 | if (traverse(Vdb)) { |
1180 | 0 | if (!CycMap.contains(Vd)) { |
1181 | 0 | CycMap.insert(std::make_pair(Vd, true)); |
1182 | 0 | StringRef L1 = Vd->getName(); |
1183 | 0 | Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation()); |
1184 | 0 | } |
1185 | 0 | } |
1186 | 0 | } |
1187 | 0 | Info->Visited = 2; |
1188 | 0 | return false; |
1189 | 0 | }; |
1190 | |
|
1191 | 0 | traverse(StartVd); |
1192 | |
|
1193 | 0 | for (auto *Info : InfoVect) |
1194 | 0 | Info->Visited = 0; |
1195 | 0 | } |
1196 | | |
1197 | | /// Gets the value decl pointer from DeclRefExprs or MemberExprs. |
1198 | 0 | static const ValueDecl *getValueDecl(const Expr *Exp) { |
1199 | 0 | if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp)) |
1200 | 0 | return getValueDecl(CE->getSubExpr()); |
1201 | | |
1202 | 0 | if (const auto *DR = dyn_cast<DeclRefExpr>(Exp)) |
1203 | 0 | return DR->getDecl(); |
1204 | | |
1205 | 0 | if (const auto *ME = dyn_cast<MemberExpr>(Exp)) |
1206 | 0 | return ME->getMemberDecl(); |
1207 | | |
1208 | 0 | return nullptr; |
1209 | 0 | } |
1210 | | |
1211 | | namespace { |
1212 | | |
1213 | | template <typename Ty> |
1214 | | class has_arg_iterator_range { |
1215 | | using yes = char[1]; |
1216 | | using no = char[2]; |
1217 | | |
1218 | | template <typename Inner> |
1219 | | static yes& test(Inner *I, decltype(I->args()) * = nullptr); |
1220 | | |
1221 | | template <typename> |
1222 | | static no& test(...); |
1223 | | |
1224 | | public: |
1225 | | static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes); |
1226 | | }; |
1227 | | |
1228 | | } // namespace |
1229 | | |
1230 | 0 | bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) { |
1231 | 0 | const threadSafety::til::SExpr *SExp = CapE.sexpr(); |
1232 | 0 | assert(SExp && "Null expressions should be ignored"); |
1233 | | |
1234 | 0 | if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) { |
1235 | 0 | const ValueDecl *VD = LP->clangDecl(); |
1236 | | // Variables defined in a function are always inaccessible. |
1237 | 0 | if (!VD || !VD->isDefinedOutsideFunctionOrMethod()) |
1238 | 0 | return false; |
1239 | | // For now we consider static class members to be inaccessible. |
1240 | 0 | if (isa<CXXRecordDecl>(VD->getDeclContext())) |
1241 | 0 | return false; |
1242 | | // Global variables are always in scope. |
1243 | 0 | return true; |
1244 | 0 | } |
1245 | | |
1246 | | // Members are in scope from methods of the same class. |
1247 | 0 | if (const auto *P = dyn_cast<til::Project>(SExp)) { |
1248 | 0 | if (!isa_and_nonnull<CXXMethodDecl>(CurrentFunction)) |
1249 | 0 | return false; |
1250 | 0 | const ValueDecl *VD = P->clangDecl(); |
1251 | 0 | return VD->getDeclContext() == CurrentFunction->getDeclContext(); |
1252 | 0 | } |
1253 | | |
1254 | 0 | return false; |
1255 | 0 | } |
1256 | | |
1257 | | /// Add a new lock to the lockset, warning if the lock is already there. |
1258 | | /// \param ReqAttr -- true if this is part of an initial Requires attribute. |
1259 | | void ThreadSafetyAnalyzer::addLock(FactSet &FSet, |
1260 | | std::unique_ptr<FactEntry> Entry, |
1261 | 0 | bool ReqAttr) { |
1262 | 0 | if (Entry->shouldIgnore()) |
1263 | 0 | return; |
1264 | | |
1265 | 0 | if (!ReqAttr && !Entry->negative()) { |
1266 | | // look for the negative capability, and remove it from the fact set. |
1267 | 0 | CapabilityExpr NegC = !*Entry; |
1268 | 0 | const FactEntry *Nen = FSet.findLock(FactMan, NegC); |
1269 | 0 | if (Nen) { |
1270 | 0 | FSet.removeLock(FactMan, NegC); |
1271 | 0 | } |
1272 | 0 | else { |
1273 | 0 | if (inCurrentScope(*Entry) && !Entry->asserted()) |
1274 | 0 | Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(), |
1275 | 0 | NegC.toString(), Entry->loc()); |
1276 | 0 | } |
1277 | 0 | } |
1278 | | |
1279 | | // Check before/after constraints |
1280 | 0 | if (Handler.issueBetaWarnings() && |
1281 | 0 | !Entry->asserted() && !Entry->declared()) { |
1282 | 0 | GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this, |
1283 | 0 | Entry->loc(), Entry->getKind()); |
1284 | 0 | } |
1285 | | |
1286 | | // FIXME: Don't always warn when we have support for reentrant locks. |
1287 | 0 | if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) { |
1288 | 0 | if (!Entry->asserted()) |
1289 | 0 | Cp->handleLock(FSet, FactMan, *Entry, Handler); |
1290 | 0 | } else { |
1291 | 0 | FSet.addLock(FactMan, std::move(Entry)); |
1292 | 0 | } |
1293 | 0 | } |
1294 | | |
1295 | | /// Remove a lock from the lockset, warning if the lock is not there. |
1296 | | /// \param UnlockLoc The source location of the unlock (only used in error msg) |
1297 | | void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp, |
1298 | | SourceLocation UnlockLoc, |
1299 | 0 | bool FullyRemove, LockKind ReceivedKind) { |
1300 | 0 | if (Cp.shouldIgnore()) |
1301 | 0 | return; |
1302 | | |
1303 | 0 | const FactEntry *LDat = FSet.findLock(FactMan, Cp); |
1304 | 0 | if (!LDat) { |
1305 | 0 | SourceLocation PrevLoc; |
1306 | 0 | if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp)) |
1307 | 0 | PrevLoc = Neg->loc(); |
1308 | 0 | Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc, |
1309 | 0 | PrevLoc); |
1310 | 0 | return; |
1311 | 0 | } |
1312 | | |
1313 | | // Generic lock removal doesn't care about lock kind mismatches, but |
1314 | | // otherwise diagnose when the lock kinds are mismatched. |
1315 | 0 | if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) { |
1316 | 0 | Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(), |
1317 | 0 | ReceivedKind, LDat->loc(), UnlockLoc); |
1318 | 0 | } |
1319 | |
|
1320 | 0 | LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler); |
1321 | 0 | } |
1322 | | |
1323 | | /// Extract the list of mutexIDs from the attribute on an expression, |
1324 | | /// and push them onto Mtxs, discarding any duplicates. |
1325 | | template <typename AttrType> |
1326 | | void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, |
1327 | | const Expr *Exp, const NamedDecl *D, |
1328 | 0 | til::SExpr *Self) { |
1329 | 0 | if (Attr->args_size() == 0) { |
1330 | | // The mutex held is the "this" object. |
1331 | 0 | CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, Self); |
1332 | 0 | if (Cp.isInvalid()) { |
1333 | 0 | warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind()); |
1334 | 0 | return; |
1335 | 0 | } |
1336 | | //else |
1337 | 0 | if (!Cp.shouldIgnore()) |
1338 | 0 | Mtxs.push_back_nodup(Cp); |
1339 | 0 | return; |
1340 | 0 | } |
1341 | | |
1342 | 0 | for (const auto *Arg : Attr->args()) { |
1343 | 0 | CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self); |
1344 | 0 | if (Cp.isInvalid()) { |
1345 | 0 | warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind()); |
1346 | 0 | continue; |
1347 | 0 | } |
1348 | | //else |
1349 | 0 | if (!Cp.shouldIgnore()) |
1350 | 0 | Mtxs.push_back_nodup(Cp); |
1351 | 0 | } |
1352 | 0 | } Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::RequiresCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::RequiresCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::threadSafety::til::SExpr*) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::ReleaseCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::ReleaseCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::threadSafety::til::SExpr*) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::AcquireCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::AcquireCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::threadSafety::til::SExpr*) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::TryAcquireCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::TryAcquireCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::threadSafety::til::SExpr*) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::ExclusiveTrylockFunctionAttr const>((anonymous namespace)::CapExprSet&, clang::ExclusiveTrylockFunctionAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::threadSafety::til::SExpr*) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::SharedTrylockFunctionAttr const>((anonymous namespace)::CapExprSet&, clang::SharedTrylockFunctionAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::threadSafety::til::SExpr*) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::AssertExclusiveLockAttr const>((anonymous namespace)::CapExprSet&, clang::AssertExclusiveLockAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::threadSafety::til::SExpr*) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::AssertSharedLockAttr const>((anonymous namespace)::CapExprSet&, clang::AssertSharedLockAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::threadSafety::til::SExpr*) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::AssertCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::AssertCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::threadSafety::til::SExpr*) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::LocksExcludedAttr const>((anonymous namespace)::CapExprSet&, clang::LocksExcludedAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::threadSafety::til::SExpr*) |
1353 | | |
1354 | | /// Extract the list of mutexIDs from a trylock attribute. If the |
1355 | | /// trylock applies to the given edge, then push them onto Mtxs, discarding |
1356 | | /// any duplicates. |
1357 | | template <class AttrType> |
1358 | | void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, |
1359 | | const Expr *Exp, const NamedDecl *D, |
1360 | | const CFGBlock *PredBlock, |
1361 | | const CFGBlock *CurrBlock, |
1362 | 0 | Expr *BrE, bool Neg) { |
1363 | | // Find out which branch has the lock |
1364 | 0 | bool branch = false; |
1365 | 0 | if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) |
1366 | 0 | branch = BLE->getValue(); |
1367 | 0 | else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) |
1368 | 0 | branch = ILE->getValue().getBoolValue(); |
1369 | |
|
1370 | 0 | int branchnum = branch ? 0 : 1; |
1371 | 0 | if (Neg) |
1372 | 0 | branchnum = !branchnum; |
1373 | | |
1374 | | // If we've taken the trylock branch, then add the lock |
1375 | 0 | int i = 0; |
1376 | 0 | for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), |
1377 | 0 | SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { |
1378 | 0 | if (*SI == CurrBlock && i == branchnum) |
1379 | 0 | getMutexIDs(Mtxs, Attr, Exp, D); |
1380 | 0 | } |
1381 | 0 | } Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::TryAcquireCapabilityAttr const>((anonymous namespace)::CapExprSet&, clang::TryAcquireCapabilityAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::CFGBlock const*, clang::CFGBlock const*, clang::Expr*, bool) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::ExclusiveTrylockFunctionAttr const>((anonymous namespace)::CapExprSet&, clang::ExclusiveTrylockFunctionAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::CFGBlock const*, clang::CFGBlock const*, clang::Expr*, bool) Unexecuted instantiation: ThreadSafety.cpp:void (anonymous namespace)::ThreadSafetyAnalyzer::getMutexIDs<clang::SharedTrylockFunctionAttr const>((anonymous namespace)::CapExprSet&, clang::SharedTrylockFunctionAttr const*, clang::Expr const*, clang::NamedDecl const*, clang::CFGBlock const*, clang::CFGBlock const*, clang::Expr*, bool) |
1382 | | |
1383 | 0 | static bool getStaticBooleanValue(Expr *E, bool &TCond) { |
1384 | 0 | if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { |
1385 | 0 | TCond = false; |
1386 | 0 | return true; |
1387 | 0 | } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) { |
1388 | 0 | TCond = BLE->getValue(); |
1389 | 0 | return true; |
1390 | 0 | } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) { |
1391 | 0 | TCond = ILE->getValue().getBoolValue(); |
1392 | 0 | return true; |
1393 | 0 | } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E)) |
1394 | 0 | return getStaticBooleanValue(CE->getSubExpr(), TCond); |
1395 | 0 | return false; |
1396 | 0 | } |
1397 | | |
1398 | | // If Cond can be traced back to a function call, return the call expression. |
1399 | | // The negate variable should be called with false, and will be set to true |
1400 | | // if the function call is negated, e.g. if (!mu.tryLock(...)) |
1401 | | const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond, |
1402 | | LocalVarContext C, |
1403 | 0 | bool &Negate) { |
1404 | 0 | if (!Cond) |
1405 | 0 | return nullptr; |
1406 | | |
1407 | 0 | if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) { |
1408 | 0 | if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect) |
1409 | 0 | return getTrylockCallExpr(CallExp->getArg(0), C, Negate); |
1410 | 0 | return CallExp; |
1411 | 0 | } |
1412 | 0 | else if (const auto *PE = dyn_cast<ParenExpr>(Cond)) |
1413 | 0 | return getTrylockCallExpr(PE->getSubExpr(), C, Negate); |
1414 | 0 | else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond)) |
1415 | 0 | return getTrylockCallExpr(CE->getSubExpr(), C, Negate); |
1416 | 0 | else if (const auto *FE = dyn_cast<FullExpr>(Cond)) |
1417 | 0 | return getTrylockCallExpr(FE->getSubExpr(), C, Negate); |
1418 | 0 | else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) { |
1419 | 0 | const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); |
1420 | 0 | return getTrylockCallExpr(E, C, Negate); |
1421 | 0 | } |
1422 | 0 | else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) { |
1423 | 0 | if (UOP->getOpcode() == UO_LNot) { |
1424 | 0 | Negate = !Negate; |
1425 | 0 | return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); |
1426 | 0 | } |
1427 | 0 | return nullptr; |
1428 | 0 | } |
1429 | 0 | else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) { |
1430 | 0 | if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) { |
1431 | 0 | if (BOP->getOpcode() == BO_NE) |
1432 | 0 | Negate = !Negate; |
1433 | |
|
1434 | 0 | bool TCond = false; |
1435 | 0 | if (getStaticBooleanValue(BOP->getRHS(), TCond)) { |
1436 | 0 | if (!TCond) Negate = !Negate; |
1437 | 0 | return getTrylockCallExpr(BOP->getLHS(), C, Negate); |
1438 | 0 | } |
1439 | 0 | TCond = false; |
1440 | 0 | if (getStaticBooleanValue(BOP->getLHS(), TCond)) { |
1441 | 0 | if (!TCond) Negate = !Negate; |
1442 | 0 | return getTrylockCallExpr(BOP->getRHS(), C, Negate); |
1443 | 0 | } |
1444 | 0 | return nullptr; |
1445 | 0 | } |
1446 | 0 | if (BOP->getOpcode() == BO_LAnd) { |
1447 | | // LHS must have been evaluated in a different block. |
1448 | 0 | return getTrylockCallExpr(BOP->getRHS(), C, Negate); |
1449 | 0 | } |
1450 | 0 | if (BOP->getOpcode() == BO_LOr) |
1451 | 0 | return getTrylockCallExpr(BOP->getRHS(), C, Negate); |
1452 | 0 | return nullptr; |
1453 | 0 | } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) { |
1454 | 0 | bool TCond, FCond; |
1455 | 0 | if (getStaticBooleanValue(COP->getTrueExpr(), TCond) && |
1456 | 0 | getStaticBooleanValue(COP->getFalseExpr(), FCond)) { |
1457 | 0 | if (TCond && !FCond) |
1458 | 0 | return getTrylockCallExpr(COP->getCond(), C, Negate); |
1459 | 0 | if (!TCond && FCond) { |
1460 | 0 | Negate = !Negate; |
1461 | 0 | return getTrylockCallExpr(COP->getCond(), C, Negate); |
1462 | 0 | } |
1463 | 0 | } |
1464 | 0 | } |
1465 | 0 | return nullptr; |
1466 | 0 | } |
1467 | | |
1468 | | /// Find the lockset that holds on the edge between PredBlock |
1469 | | /// and CurrBlock. The edge set is the exit set of PredBlock (passed |
1470 | | /// as the ExitSet parameter) plus any trylocks, which are conditionally held. |
1471 | | void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, |
1472 | | const FactSet &ExitSet, |
1473 | | const CFGBlock *PredBlock, |
1474 | 0 | const CFGBlock *CurrBlock) { |
1475 | 0 | Result = ExitSet; |
1476 | |
|
1477 | 0 | const Stmt *Cond = PredBlock->getTerminatorCondition(); |
1478 | | // We don't acquire try-locks on ?: branches, only when its result is used. |
1479 | 0 | if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt())) |
1480 | 0 | return; |
1481 | | |
1482 | 0 | bool Negate = false; |
1483 | 0 | const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; |
1484 | 0 | const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; |
1485 | |
|
1486 | 0 | const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate); |
1487 | 0 | if (!Exp) |
1488 | 0 | return; |
1489 | | |
1490 | 0 | auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); |
1491 | 0 | if(!FunDecl || !FunDecl->hasAttrs()) |
1492 | 0 | return; |
1493 | | |
1494 | 0 | CapExprSet ExclusiveLocksToAdd; |
1495 | 0 | CapExprSet SharedLocksToAdd; |
1496 | | |
1497 | | // If the condition is a call to a Trylock function, then grab the attributes |
1498 | 0 | for (const auto *Attr : FunDecl->attrs()) { |
1499 | 0 | switch (Attr->getKind()) { |
1500 | 0 | case attr::TryAcquireCapability: { |
1501 | 0 | auto *A = cast<TryAcquireCapabilityAttr>(Attr); |
1502 | 0 | getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, |
1503 | 0 | Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(), |
1504 | 0 | Negate); |
1505 | 0 | break; |
1506 | 0 | }; |
1507 | 0 | case attr::ExclusiveTrylockFunction: { |
1508 | 0 | const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr); |
1509 | 0 | getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock, |
1510 | 0 | A->getSuccessValue(), Negate); |
1511 | 0 | break; |
1512 | 0 | } |
1513 | 0 | case attr::SharedTrylockFunction: { |
1514 | 0 | const auto *A = cast<SharedTrylockFunctionAttr>(Attr); |
1515 | 0 | getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock, |
1516 | 0 | A->getSuccessValue(), Negate); |
1517 | 0 | break; |
1518 | 0 | } |
1519 | 0 | default: |
1520 | 0 | break; |
1521 | 0 | } |
1522 | 0 | } |
1523 | | |
1524 | | // Add and remove locks. |
1525 | 0 | SourceLocation Loc = Exp->getExprLoc(); |
1526 | 0 | for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd) |
1527 | 0 | addLock(Result, std::make_unique<LockableFactEntry>(ExclusiveLockToAdd, |
1528 | 0 | LK_Exclusive, Loc)); |
1529 | 0 | for (const auto &SharedLockToAdd : SharedLocksToAdd) |
1530 | 0 | addLock(Result, std::make_unique<LockableFactEntry>(SharedLockToAdd, |
1531 | 0 | LK_Shared, Loc)); |
1532 | 0 | } |
1533 | | |
1534 | | namespace { |
1535 | | |
1536 | | /// We use this class to visit different types of expressions in |
1537 | | /// CFGBlocks, and build up the lockset. |
1538 | | /// An expression may cause us to add or remove locks from the lockset, or else |
1539 | | /// output error messages related to missing locks. |
1540 | | /// FIXME: In future, we may be able to not inherit from a visitor. |
1541 | | class BuildLockset : public ConstStmtVisitor<BuildLockset> { |
1542 | | friend class ThreadSafetyAnalyzer; |
1543 | | |
1544 | | ThreadSafetyAnalyzer *Analyzer; |
1545 | | FactSet FSet; |
1546 | | // The fact set for the function on exit. |
1547 | | const FactSet &FunctionExitFSet; |
1548 | | LocalVariableMap::Context LVarCtx; |
1549 | | unsigned CtxIndex; |
1550 | | |
1551 | | // helper functions |
1552 | | |
1553 | | void checkAccess(const Expr *Exp, AccessKind AK, |
1554 | 0 | ProtectedOperationKind POK = POK_VarAccess) { |
1555 | 0 | Analyzer->checkAccess(FSet, Exp, AK, POK); |
1556 | 0 | } |
1557 | | void checkPtAccess(const Expr *Exp, AccessKind AK, |
1558 | 0 | ProtectedOperationKind POK = POK_VarAccess) { |
1559 | 0 | Analyzer->checkPtAccess(FSet, Exp, AK, POK); |
1560 | 0 | } |
1561 | | |
1562 | | void handleCall(const Expr *Exp, const NamedDecl *D, |
1563 | | til::LiteralPtr *Self = nullptr, |
1564 | | SourceLocation Loc = SourceLocation()); |
1565 | | void examineArguments(const FunctionDecl *FD, |
1566 | | CallExpr::const_arg_iterator ArgBegin, |
1567 | | CallExpr::const_arg_iterator ArgEnd, |
1568 | | bool SkipFirstParam = false); |
1569 | | |
1570 | | public: |
1571 | | BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info, |
1572 | | const FactSet &FunctionExitFSet) |
1573 | | : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet), |
1574 | | FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext), |
1575 | 0 | CtxIndex(Info.EntryIndex) {} |
1576 | | |
1577 | | void VisitUnaryOperator(const UnaryOperator *UO); |
1578 | | void VisitBinaryOperator(const BinaryOperator *BO); |
1579 | | void VisitCastExpr(const CastExpr *CE); |
1580 | | void VisitCallExpr(const CallExpr *Exp); |
1581 | | void VisitCXXConstructExpr(const CXXConstructExpr *Exp); |
1582 | | void VisitDeclStmt(const DeclStmt *S); |
1583 | | void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp); |
1584 | | void VisitReturnStmt(const ReturnStmt *S); |
1585 | | }; |
1586 | | |
1587 | | } // namespace |
1588 | | |
1589 | | /// Warn if the LSet does not contain a lock sufficient to protect access |
1590 | | /// of at least the passed in AccessKind. |
1591 | | void ThreadSafetyAnalyzer::warnIfMutexNotHeld( |
1592 | | const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK, |
1593 | | Expr *MutexExp, ProtectedOperationKind POK, til::LiteralPtr *Self, |
1594 | 0 | SourceLocation Loc) { |
1595 | 0 | LockKind LK = getLockKindFromAccessKind(AK); |
1596 | 0 | CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self); |
1597 | 0 | if (Cp.isInvalid()) { |
1598 | 0 | warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind()); |
1599 | 0 | return; |
1600 | 0 | } else if (Cp.shouldIgnore()) { |
1601 | 0 | return; |
1602 | 0 | } |
1603 | | |
1604 | 0 | if (Cp.negative()) { |
1605 | | // Negative capabilities act like locks excluded |
1606 | 0 | const FactEntry *LDat = FSet.findLock(FactMan, !Cp); |
1607 | 0 | if (LDat) { |
1608 | 0 | Handler.handleFunExcludesLock(Cp.getKind(), D->getNameAsString(), |
1609 | 0 | (!Cp).toString(), Loc); |
1610 | 0 | return; |
1611 | 0 | } |
1612 | | |
1613 | | // If this does not refer to a negative capability in the same class, |
1614 | | // then stop here. |
1615 | 0 | if (!inCurrentScope(Cp)) |
1616 | 0 | return; |
1617 | | |
1618 | | // Otherwise the negative requirement must be propagated to the caller. |
1619 | 0 | LDat = FSet.findLock(FactMan, Cp); |
1620 | 0 | if (!LDat) { |
1621 | 0 | Handler.handleNegativeNotHeld(D, Cp.toString(), Loc); |
1622 | 0 | } |
1623 | 0 | return; |
1624 | 0 | } |
1625 | | |
1626 | 0 | const FactEntry *LDat = FSet.findLockUniv(FactMan, Cp); |
1627 | 0 | bool NoError = true; |
1628 | 0 | if (!LDat) { |
1629 | | // No exact match found. Look for a partial match. |
1630 | 0 | LDat = FSet.findPartialMatch(FactMan, Cp); |
1631 | 0 | if (LDat) { |
1632 | | // Warn that there's no precise match. |
1633 | 0 | std::string PartMatchStr = LDat->toString(); |
1634 | 0 | StringRef PartMatchName(PartMatchStr); |
1635 | 0 | Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc, |
1636 | 0 | &PartMatchName); |
1637 | 0 | } else { |
1638 | | // Warn that there's no match at all. |
1639 | 0 | Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc); |
1640 | 0 | } |
1641 | 0 | NoError = false; |
1642 | 0 | } |
1643 | | // Make sure the mutex we found is the right kind. |
1644 | 0 | if (NoError && LDat && !LDat->isAtLeast(LK)) { |
1645 | 0 | Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc); |
1646 | 0 | } |
1647 | 0 | } |
1648 | | |
1649 | | /// Warn if the LSet contains the given lock. |
1650 | | void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet, |
1651 | | const NamedDecl *D, const Expr *Exp, |
1652 | | Expr *MutexExp, |
1653 | | til::LiteralPtr *Self, |
1654 | 0 | SourceLocation Loc) { |
1655 | 0 | CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self); |
1656 | 0 | if (Cp.isInvalid()) { |
1657 | 0 | warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind()); |
1658 | 0 | return; |
1659 | 0 | } else if (Cp.shouldIgnore()) { |
1660 | 0 | return; |
1661 | 0 | } |
1662 | | |
1663 | 0 | const FactEntry *LDat = FSet.findLock(FactMan, Cp); |
1664 | 0 | if (LDat) { |
1665 | 0 | Handler.handleFunExcludesLock(Cp.getKind(), D->getNameAsString(), |
1666 | 0 | Cp.toString(), Loc); |
1667 | 0 | } |
1668 | 0 | } |
1669 | | |
1670 | | /// Checks guarded_by and pt_guarded_by attributes. |
1671 | | /// Whenever we identify an access (read or write) to a DeclRefExpr that is |
1672 | | /// marked with guarded_by, we must ensure the appropriate mutexes are held. |
1673 | | /// Similarly, we check if the access is to an expression that dereferences |
1674 | | /// a pointer marked with pt_guarded_by. |
1675 | | void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp, |
1676 | | AccessKind AK, |
1677 | 0 | ProtectedOperationKind POK) { |
1678 | 0 | Exp = Exp->IgnoreImplicit()->IgnoreParenCasts(); |
1679 | |
|
1680 | 0 | SourceLocation Loc = Exp->getExprLoc(); |
1681 | | |
1682 | | // Local variables of reference type cannot be re-assigned; |
1683 | | // map them to their initializer. |
1684 | 0 | while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) { |
1685 | 0 | const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl()); |
1686 | 0 | if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) { |
1687 | 0 | if (const auto *E = VD->getInit()) { |
1688 | | // Guard against self-initialization. e.g., int &i = i; |
1689 | 0 | if (E == Exp) |
1690 | 0 | break; |
1691 | 0 | Exp = E; |
1692 | 0 | continue; |
1693 | 0 | } |
1694 | 0 | } |
1695 | 0 | break; |
1696 | 0 | } |
1697 | |
|
1698 | 0 | if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) { |
1699 | | // For dereferences |
1700 | 0 | if (UO->getOpcode() == UO_Deref) |
1701 | 0 | checkPtAccess(FSet, UO->getSubExpr(), AK, POK); |
1702 | 0 | return; |
1703 | 0 | } |
1704 | | |
1705 | 0 | if (const auto *BO = dyn_cast<BinaryOperator>(Exp)) { |
1706 | 0 | switch (BO->getOpcode()) { |
1707 | 0 | case BO_PtrMemD: // .* |
1708 | 0 | return checkAccess(FSet, BO->getLHS(), AK, POK); |
1709 | 0 | case BO_PtrMemI: // ->* |
1710 | 0 | return checkPtAccess(FSet, BO->getLHS(), AK, POK); |
1711 | 0 | default: |
1712 | 0 | return; |
1713 | 0 | } |
1714 | 0 | } |
1715 | | |
1716 | 0 | if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) { |
1717 | 0 | checkPtAccess(FSet, AE->getLHS(), AK, POK); |
1718 | 0 | return; |
1719 | 0 | } |
1720 | | |
1721 | 0 | if (const auto *ME = dyn_cast<MemberExpr>(Exp)) { |
1722 | 0 | if (ME->isArrow()) |
1723 | 0 | checkPtAccess(FSet, ME->getBase(), AK, POK); |
1724 | 0 | else |
1725 | 0 | checkAccess(FSet, ME->getBase(), AK, POK); |
1726 | 0 | } |
1727 | |
|
1728 | 0 | const ValueDecl *D = getValueDecl(Exp); |
1729 | 0 | if (!D || !D->hasAttrs()) |
1730 | 0 | return; |
1731 | | |
1732 | 0 | if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) { |
1733 | 0 | Handler.handleNoMutexHeld(D, POK, AK, Loc); |
1734 | 0 | } |
1735 | |
|
1736 | 0 | for (const auto *I : D->specific_attrs<GuardedByAttr>()) |
1737 | 0 | warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), POK, nullptr, Loc); |
1738 | 0 | } |
1739 | | |
1740 | | /// Checks pt_guarded_by and pt_guarded_var attributes. |
1741 | | /// POK is the same operationKind that was passed to checkAccess. |
1742 | | void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp, |
1743 | | AccessKind AK, |
1744 | 0 | ProtectedOperationKind POK) { |
1745 | 0 | while (true) { |
1746 | 0 | if (const auto *PE = dyn_cast<ParenExpr>(Exp)) { |
1747 | 0 | Exp = PE->getSubExpr(); |
1748 | 0 | continue; |
1749 | 0 | } |
1750 | 0 | if (const auto *CE = dyn_cast<CastExpr>(Exp)) { |
1751 | 0 | if (CE->getCastKind() == CK_ArrayToPointerDecay) { |
1752 | | // If it's an actual array, and not a pointer, then it's elements |
1753 | | // are protected by GUARDED_BY, not PT_GUARDED_BY; |
1754 | 0 | checkAccess(FSet, CE->getSubExpr(), AK, POK); |
1755 | 0 | return; |
1756 | 0 | } |
1757 | 0 | Exp = CE->getSubExpr(); |
1758 | 0 | continue; |
1759 | 0 | } |
1760 | 0 | break; |
1761 | 0 | } |
1762 | | |
1763 | | // Pass by reference warnings are under a different flag. |
1764 | 0 | ProtectedOperationKind PtPOK = POK_VarDereference; |
1765 | 0 | if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef; |
1766 | 0 | if (POK == POK_ReturnByRef) |
1767 | 0 | PtPOK = POK_PtReturnByRef; |
1768 | |
|
1769 | 0 | const ValueDecl *D = getValueDecl(Exp); |
1770 | 0 | if (!D || !D->hasAttrs()) |
1771 | 0 | return; |
1772 | | |
1773 | 0 | if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan)) |
1774 | 0 | Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc()); |
1775 | |
|
1776 | 0 | for (auto const *I : D->specific_attrs<PtGuardedByAttr>()) |
1777 | 0 | warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), PtPOK, nullptr, |
1778 | 0 | Exp->getExprLoc()); |
1779 | 0 | } |
1780 | | |
1781 | | /// Process a function call, method call, constructor call, |
1782 | | /// or destructor call. This involves looking at the attributes on the |
1783 | | /// corresponding function/method/constructor/destructor, issuing warnings, |
1784 | | /// and updating the locksets accordingly. |
1785 | | /// |
1786 | | /// FIXME: For classes annotated with one of the guarded annotations, we need |
1787 | | /// to treat const method calls as reads and non-const method calls as writes, |
1788 | | /// and check that the appropriate locks are held. Non-const method calls with |
1789 | | /// the same signature as const method calls can be also treated as reads. |
1790 | | /// |
1791 | | /// \param Exp The call expression. |
1792 | | /// \param D The callee declaration. |
1793 | | /// \param Self If \p Exp = nullptr, the implicit this argument or the argument |
1794 | | /// of an implicitly called cleanup function. |
1795 | | /// \param Loc If \p Exp = nullptr, the location. |
1796 | | void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D, |
1797 | 0 | til::LiteralPtr *Self, SourceLocation Loc) { |
1798 | 0 | CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd; |
1799 | 0 | CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove; |
1800 | 0 | CapExprSet ScopedReqsAndExcludes; |
1801 | | |
1802 | | // Figure out if we're constructing an object of scoped lockable class |
1803 | 0 | CapabilityExpr Scp; |
1804 | 0 | if (Exp) { |
1805 | 0 | assert(!Self); |
1806 | 0 | const auto *TagT = Exp->getType()->getAs<TagType>(); |
1807 | 0 | if (TagT && Exp->isPRValue()) { |
1808 | 0 | std::pair<til::LiteralPtr *, StringRef> Placeholder = |
1809 | 0 | Analyzer->SxBuilder.createThisPlaceholder(Exp); |
1810 | 0 | [[maybe_unused]] auto inserted = |
1811 | 0 | Analyzer->ConstructedObjects.insert({Exp, Placeholder.first}); |
1812 | 0 | assert(inserted.second && "Are we visiting the same expression again?"); |
1813 | 0 | if (isa<CXXConstructExpr>(Exp)) |
1814 | 0 | Self = Placeholder.first; |
1815 | 0 | if (TagT->getDecl()->hasAttr<ScopedLockableAttr>()) |
1816 | 0 | Scp = CapabilityExpr(Placeholder.first, Placeholder.second, false); |
1817 | 0 | } |
1818 | | |
1819 | 0 | assert(Loc.isInvalid()); |
1820 | 0 | Loc = Exp->getExprLoc(); |
1821 | 0 | } |
1822 | | |
1823 | 0 | for(const Attr *At : D->attrs()) { |
1824 | 0 | switch (At->getKind()) { |
1825 | | // When we encounter a lock function, we need to add the lock to our |
1826 | | // lockset. |
1827 | 0 | case attr::AcquireCapability: { |
1828 | 0 | const auto *A = cast<AcquireCapabilityAttr>(At); |
1829 | 0 | Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd |
1830 | 0 | : ExclusiveLocksToAdd, |
1831 | 0 | A, Exp, D, Self); |
1832 | 0 | break; |
1833 | 0 | } |
1834 | | |
1835 | | // An assert will add a lock to the lockset, but will not generate |
1836 | | // a warning if it is already there, and will not generate a warning |
1837 | | // if it is not removed. |
1838 | 0 | case attr::AssertExclusiveLock: { |
1839 | 0 | const auto *A = cast<AssertExclusiveLockAttr>(At); |
1840 | |
|
1841 | 0 | CapExprSet AssertLocks; |
1842 | 0 | Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self); |
1843 | 0 | for (const auto &AssertLock : AssertLocks) |
1844 | 0 | Analyzer->addLock( |
1845 | 0 | FSet, std::make_unique<LockableFactEntry>( |
1846 | 0 | AssertLock, LK_Exclusive, Loc, FactEntry::Asserted)); |
1847 | 0 | break; |
1848 | 0 | } |
1849 | 0 | case attr::AssertSharedLock: { |
1850 | 0 | const auto *A = cast<AssertSharedLockAttr>(At); |
1851 | |
|
1852 | 0 | CapExprSet AssertLocks; |
1853 | 0 | Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self); |
1854 | 0 | for (const auto &AssertLock : AssertLocks) |
1855 | 0 | Analyzer->addLock( |
1856 | 0 | FSet, std::make_unique<LockableFactEntry>( |
1857 | 0 | AssertLock, LK_Shared, Loc, FactEntry::Asserted)); |
1858 | 0 | break; |
1859 | 0 | } |
1860 | | |
1861 | 0 | case attr::AssertCapability: { |
1862 | 0 | const auto *A = cast<AssertCapabilityAttr>(At); |
1863 | 0 | CapExprSet AssertLocks; |
1864 | 0 | Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self); |
1865 | 0 | for (const auto &AssertLock : AssertLocks) |
1866 | 0 | Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>( |
1867 | 0 | AssertLock, |
1868 | 0 | A->isShared() ? LK_Shared : LK_Exclusive, |
1869 | 0 | Loc, FactEntry::Asserted)); |
1870 | 0 | break; |
1871 | 0 | } |
1872 | | |
1873 | | // When we encounter an unlock function, we need to remove unlocked |
1874 | | // mutexes from the lockset, and flag a warning if they are not there. |
1875 | 0 | case attr::ReleaseCapability: { |
1876 | 0 | const auto *A = cast<ReleaseCapabilityAttr>(At); |
1877 | 0 | if (A->isGeneric()) |
1878 | 0 | Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self); |
1879 | 0 | else if (A->isShared()) |
1880 | 0 | Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self); |
1881 | 0 | else |
1882 | 0 | Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self); |
1883 | 0 | break; |
1884 | 0 | } |
1885 | | |
1886 | 0 | case attr::RequiresCapability: { |
1887 | 0 | const auto *A = cast<RequiresCapabilityAttr>(At); |
1888 | 0 | for (auto *Arg : A->args()) { |
1889 | 0 | Analyzer->warnIfMutexNotHeld(FSet, D, Exp, |
1890 | 0 | A->isShared() ? AK_Read : AK_Written, |
1891 | 0 | Arg, POK_FunctionCall, Self, Loc); |
1892 | | // use for adopting a lock |
1893 | 0 | if (!Scp.shouldIgnore()) |
1894 | 0 | Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self); |
1895 | 0 | } |
1896 | 0 | break; |
1897 | 0 | } |
1898 | | |
1899 | 0 | case attr::LocksExcluded: { |
1900 | 0 | const auto *A = cast<LocksExcludedAttr>(At); |
1901 | 0 | for (auto *Arg : A->args()) { |
1902 | 0 | Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc); |
1903 | | // use for deferring a lock |
1904 | 0 | if (!Scp.shouldIgnore()) |
1905 | 0 | Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self); |
1906 | 0 | } |
1907 | 0 | break; |
1908 | 0 | } |
1909 | | |
1910 | | // Ignore attributes unrelated to thread-safety |
1911 | 0 | default: |
1912 | 0 | break; |
1913 | 0 | } |
1914 | 0 | } |
1915 | | |
1916 | | // Remove locks first to allow lock upgrading/downgrading. |
1917 | | // FIXME -- should only fully remove if the attribute refers to 'this'. |
1918 | 0 | bool Dtor = isa<CXXDestructorDecl>(D); |
1919 | 0 | for (const auto &M : ExclusiveLocksToRemove) |
1920 | 0 | Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive); |
1921 | 0 | for (const auto &M : SharedLocksToRemove) |
1922 | 0 | Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared); |
1923 | 0 | for (const auto &M : GenericLocksToRemove) |
1924 | 0 | Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic); |
1925 | | |
1926 | | // Add locks. |
1927 | 0 | FactEntry::SourceKind Source = |
1928 | 0 | !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired; |
1929 | 0 | for (const auto &M : ExclusiveLocksToAdd) |
1930 | 0 | Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive, |
1931 | 0 | Loc, Source)); |
1932 | 0 | for (const auto &M : SharedLocksToAdd) |
1933 | 0 | Analyzer->addLock( |
1934 | 0 | FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source)); |
1935 | |
|
1936 | 0 | if (!Scp.shouldIgnore()) { |
1937 | | // Add the managing object as a dummy mutex, mapped to the underlying mutex. |
1938 | 0 | auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(Scp, Loc); |
1939 | 0 | for (const auto &M : ExclusiveLocksToAdd) |
1940 | 0 | ScopedEntry->addLock(M); |
1941 | 0 | for (const auto &M : SharedLocksToAdd) |
1942 | 0 | ScopedEntry->addLock(M); |
1943 | 0 | for (const auto &M : ScopedReqsAndExcludes) |
1944 | 0 | ScopedEntry->addLock(M); |
1945 | 0 | for (const auto &M : ExclusiveLocksToRemove) |
1946 | 0 | ScopedEntry->addExclusiveUnlock(M); |
1947 | 0 | for (const auto &M : SharedLocksToRemove) |
1948 | 0 | ScopedEntry->addSharedUnlock(M); |
1949 | 0 | Analyzer->addLock(FSet, std::move(ScopedEntry)); |
1950 | 0 | } |
1951 | 0 | } |
1952 | | |
1953 | | /// For unary operations which read and write a variable, we need to |
1954 | | /// check whether we hold any required mutexes. Reads are checked in |
1955 | | /// VisitCastExpr. |
1956 | 0 | void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) { |
1957 | 0 | switch (UO->getOpcode()) { |
1958 | 0 | case UO_PostDec: |
1959 | 0 | case UO_PostInc: |
1960 | 0 | case UO_PreDec: |
1961 | 0 | case UO_PreInc: |
1962 | 0 | checkAccess(UO->getSubExpr(), AK_Written); |
1963 | 0 | break; |
1964 | 0 | default: |
1965 | 0 | break; |
1966 | 0 | } |
1967 | 0 | } |
1968 | | |
1969 | | /// For binary operations which assign to a variable (writes), we need to check |
1970 | | /// whether we hold any required mutexes. |
1971 | | /// FIXME: Deal with non-primitive types. |
1972 | 0 | void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) { |
1973 | 0 | if (!BO->isAssignmentOp()) |
1974 | 0 | return; |
1975 | | |
1976 | | // adjust the context |
1977 | 0 | LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); |
1978 | |
|
1979 | 0 | checkAccess(BO->getLHS(), AK_Written); |
1980 | 0 | } |
1981 | | |
1982 | | /// Whenever we do an LValue to Rvalue cast, we are reading a variable and |
1983 | | /// need to ensure we hold any required mutexes. |
1984 | | /// FIXME: Deal with non-primitive types. |
1985 | 0 | void BuildLockset::VisitCastExpr(const CastExpr *CE) { |
1986 | 0 | if (CE->getCastKind() != CK_LValueToRValue) |
1987 | 0 | return; |
1988 | 0 | checkAccess(CE->getSubExpr(), AK_Read); |
1989 | 0 | } |
1990 | | |
1991 | | void BuildLockset::examineArguments(const FunctionDecl *FD, |
1992 | | CallExpr::const_arg_iterator ArgBegin, |
1993 | | CallExpr::const_arg_iterator ArgEnd, |
1994 | 0 | bool SkipFirstParam) { |
1995 | | // Currently we can't do anything if we don't know the function declaration. |
1996 | 0 | if (!FD) |
1997 | 0 | return; |
1998 | | |
1999 | | // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it |
2000 | | // only turns off checking within the body of a function, but we also |
2001 | | // use it to turn off checking in arguments to the function. This |
2002 | | // could result in some false negatives, but the alternative is to |
2003 | | // create yet another attribute. |
2004 | 0 | if (FD->hasAttr<NoThreadSafetyAnalysisAttr>()) |
2005 | 0 | return; |
2006 | | |
2007 | 0 | const ArrayRef<ParmVarDecl *> Params = FD->parameters(); |
2008 | 0 | auto Param = Params.begin(); |
2009 | 0 | if (SkipFirstParam) |
2010 | 0 | ++Param; |
2011 | | |
2012 | | // There can be default arguments, so we stop when one iterator is at end(). |
2013 | 0 | for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd; |
2014 | 0 | ++Param, ++Arg) { |
2015 | 0 | QualType Qt = (*Param)->getType(); |
2016 | 0 | if (Qt->isReferenceType()) |
2017 | 0 | checkAccess(*Arg, AK_Read, POK_PassByRef); |
2018 | 0 | } |
2019 | 0 | } |
2020 | | |
2021 | 0 | void BuildLockset::VisitCallExpr(const CallExpr *Exp) { |
2022 | 0 | if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) { |
2023 | 0 | const auto *ME = dyn_cast<MemberExpr>(CE->getCallee()); |
2024 | | // ME can be null when calling a method pointer |
2025 | 0 | const CXXMethodDecl *MD = CE->getMethodDecl(); |
2026 | |
|
2027 | 0 | if (ME && MD) { |
2028 | 0 | if (ME->isArrow()) { |
2029 | | // Should perhaps be AK_Written if !MD->isConst(). |
2030 | 0 | checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); |
2031 | 0 | } else { |
2032 | | // Should perhaps be AK_Written if !MD->isConst(). |
2033 | 0 | checkAccess(CE->getImplicitObjectArgument(), AK_Read); |
2034 | 0 | } |
2035 | 0 | } |
2036 | |
|
2037 | 0 | examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end()); |
2038 | 0 | } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) { |
2039 | 0 | OverloadedOperatorKind OEop = OE->getOperator(); |
2040 | 0 | switch (OEop) { |
2041 | 0 | case OO_Equal: |
2042 | 0 | case OO_PlusEqual: |
2043 | 0 | case OO_MinusEqual: |
2044 | 0 | case OO_StarEqual: |
2045 | 0 | case OO_SlashEqual: |
2046 | 0 | case OO_PercentEqual: |
2047 | 0 | case OO_CaretEqual: |
2048 | 0 | case OO_AmpEqual: |
2049 | 0 | case OO_PipeEqual: |
2050 | 0 | case OO_LessLessEqual: |
2051 | 0 | case OO_GreaterGreaterEqual: |
2052 | 0 | checkAccess(OE->getArg(1), AK_Read); |
2053 | 0 | [[fallthrough]]; |
2054 | 0 | case OO_PlusPlus: |
2055 | 0 | case OO_MinusMinus: |
2056 | 0 | checkAccess(OE->getArg(0), AK_Written); |
2057 | 0 | break; |
2058 | 0 | case OO_Star: |
2059 | 0 | case OO_ArrowStar: |
2060 | 0 | case OO_Arrow: |
2061 | 0 | case OO_Subscript: |
2062 | 0 | if (!(OEop == OO_Star && OE->getNumArgs() > 1)) { |
2063 | | // Grrr. operator* can be multiplication... |
2064 | 0 | checkPtAccess(OE->getArg(0), AK_Read); |
2065 | 0 | } |
2066 | 0 | [[fallthrough]]; |
2067 | 0 | default: { |
2068 | | // TODO: get rid of this, and rely on pass-by-ref instead. |
2069 | 0 | const Expr *Obj = OE->getArg(0); |
2070 | 0 | checkAccess(Obj, AK_Read); |
2071 | | // Check the remaining arguments. For method operators, the first |
2072 | | // argument is the implicit self argument, and doesn't appear in the |
2073 | | // FunctionDecl, but for non-methods it does. |
2074 | 0 | const FunctionDecl *FD = OE->getDirectCallee(); |
2075 | 0 | examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(), |
2076 | 0 | /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD)); |
2077 | 0 | break; |
2078 | 0 | } |
2079 | 0 | } |
2080 | 0 | } else { |
2081 | 0 | examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end()); |
2082 | 0 | } |
2083 | | |
2084 | 0 | auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); |
2085 | 0 | if(!D || !D->hasAttrs()) |
2086 | 0 | return; |
2087 | 0 | handleCall(Exp, D); |
2088 | 0 | } |
2089 | | |
2090 | 0 | void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) { |
2091 | 0 | const CXXConstructorDecl *D = Exp->getConstructor(); |
2092 | 0 | if (D && D->isCopyConstructor()) { |
2093 | 0 | const Expr* Source = Exp->getArg(0); |
2094 | 0 | checkAccess(Source, AK_Read); |
2095 | 0 | } else { |
2096 | 0 | examineArguments(D, Exp->arg_begin(), Exp->arg_end()); |
2097 | 0 | } |
2098 | 0 | if (D && D->hasAttrs()) |
2099 | 0 | handleCall(Exp, D); |
2100 | 0 | } |
2101 | | |
2102 | 0 | static const Expr *UnpackConstruction(const Expr *E) { |
2103 | 0 | if (auto *CE = dyn_cast<CastExpr>(E)) |
2104 | 0 | if (CE->getCastKind() == CK_NoOp) |
2105 | 0 | E = CE->getSubExpr()->IgnoreParens(); |
2106 | 0 | if (auto *CE = dyn_cast<CastExpr>(E)) |
2107 | 0 | if (CE->getCastKind() == CK_ConstructorConversion || |
2108 | 0 | CE->getCastKind() == CK_UserDefinedConversion) |
2109 | 0 | E = CE->getSubExpr(); |
2110 | 0 | if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E)) |
2111 | 0 | E = BTE->getSubExpr(); |
2112 | 0 | return E; |
2113 | 0 | } |
2114 | | |
2115 | 0 | void BuildLockset::VisitDeclStmt(const DeclStmt *S) { |
2116 | | // adjust the context |
2117 | 0 | LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); |
2118 | |
|
2119 | 0 | for (auto *D : S->getDeclGroup()) { |
2120 | 0 | if (auto *VD = dyn_cast_or_null<VarDecl>(D)) { |
2121 | 0 | const Expr *E = VD->getInit(); |
2122 | 0 | if (!E) |
2123 | 0 | continue; |
2124 | 0 | E = E->IgnoreParens(); |
2125 | | |
2126 | | // handle constructors that involve temporaries |
2127 | 0 | if (auto *EWC = dyn_cast<ExprWithCleanups>(E)) |
2128 | 0 | E = EWC->getSubExpr()->IgnoreParens(); |
2129 | 0 | E = UnpackConstruction(E); |
2130 | |
|
2131 | 0 | if (auto Object = Analyzer->ConstructedObjects.find(E); |
2132 | 0 | Object != Analyzer->ConstructedObjects.end()) { |
2133 | 0 | Object->second->setClangDecl(VD); |
2134 | 0 | Analyzer->ConstructedObjects.erase(Object); |
2135 | 0 | } |
2136 | 0 | } |
2137 | 0 | } |
2138 | 0 | } |
2139 | | |
2140 | | void BuildLockset::VisitMaterializeTemporaryExpr( |
2141 | 0 | const MaterializeTemporaryExpr *Exp) { |
2142 | 0 | if (const ValueDecl *ExtD = Exp->getExtendingDecl()) { |
2143 | 0 | if (auto Object = Analyzer->ConstructedObjects.find( |
2144 | 0 | UnpackConstruction(Exp->getSubExpr())); |
2145 | 0 | Object != Analyzer->ConstructedObjects.end()) { |
2146 | 0 | Object->second->setClangDecl(ExtD); |
2147 | 0 | Analyzer->ConstructedObjects.erase(Object); |
2148 | 0 | } |
2149 | 0 | } |
2150 | 0 | } |
2151 | | |
2152 | 0 | void BuildLockset::VisitReturnStmt(const ReturnStmt *S) { |
2153 | 0 | if (Analyzer->CurrentFunction == nullptr) |
2154 | 0 | return; |
2155 | 0 | const Expr *RetVal = S->getRetValue(); |
2156 | 0 | if (!RetVal) |
2157 | 0 | return; |
2158 | | |
2159 | | // If returning by reference, check that the function requires the appropriate |
2160 | | // capabilities. |
2161 | 0 | const QualType ReturnType = |
2162 | 0 | Analyzer->CurrentFunction->getReturnType().getCanonicalType(); |
2163 | 0 | if (ReturnType->isLValueReferenceType()) { |
2164 | 0 | Analyzer->checkAccess( |
2165 | 0 | FunctionExitFSet, RetVal, |
2166 | 0 | ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written, |
2167 | 0 | POK_ReturnByRef); |
2168 | 0 | } |
2169 | 0 | } |
2170 | | |
2171 | | /// Given two facts merging on a join point, possibly warn and decide whether to |
2172 | | /// keep or replace. |
2173 | | /// |
2174 | | /// \param CanModify Whether we can replace \p A by \p B. |
2175 | | /// \return false if we should keep \p A, true if we should take \p B. |
2176 | | bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B, |
2177 | 0 | bool CanModify) { |
2178 | 0 | if (A.kind() != B.kind()) { |
2179 | | // For managed capabilities, the destructor should unlock in the right mode |
2180 | | // anyway. For asserted capabilities no unlocking is needed. |
2181 | 0 | if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) { |
2182 | | // The shared capability subsumes the exclusive capability, if possible. |
2183 | 0 | bool ShouldTakeB = B.kind() == LK_Shared; |
2184 | 0 | if (CanModify || !ShouldTakeB) |
2185 | 0 | return ShouldTakeB; |
2186 | 0 | } |
2187 | 0 | Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(), |
2188 | 0 | A.loc()); |
2189 | | // Take the exclusive capability to reduce further warnings. |
2190 | 0 | return CanModify && B.kind() == LK_Exclusive; |
2191 | 0 | } else { |
2192 | | // The non-asserted capability is the one we want to track. |
2193 | 0 | return CanModify && A.asserted() && !B.asserted(); |
2194 | 0 | } |
2195 | 0 | } |
2196 | | |
2197 | | /// Compute the intersection of two locksets and issue warnings for any |
2198 | | /// locks in the symmetric difference. |
2199 | | /// |
2200 | | /// This function is used at a merge point in the CFG when comparing the lockset |
2201 | | /// of each branch being merged. For example, given the following sequence: |
2202 | | /// A; if () then B; else C; D; we need to check that the lockset after B and C |
2203 | | /// are the same. In the event of a difference, we use the intersection of these |
2204 | | /// two locksets at the start of D. |
2205 | | /// |
2206 | | /// \param EntrySet A lockset for entry into a (possibly new) block. |
2207 | | /// \param ExitSet The lockset on exiting a preceding block. |
2208 | | /// \param JoinLoc The location of the join point for error reporting |
2209 | | /// \param EntryLEK The warning if a mutex is missing from \p EntrySet. |
2210 | | /// \param ExitLEK The warning if a mutex is missing from \p ExitSet. |
2211 | | void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet, |
2212 | | const FactSet &ExitSet, |
2213 | | SourceLocation JoinLoc, |
2214 | | LockErrorKind EntryLEK, |
2215 | 0 | LockErrorKind ExitLEK) { |
2216 | 0 | FactSet EntrySetOrig = EntrySet; |
2217 | | |
2218 | | // Find locks in ExitSet that conflict or are not in EntrySet, and warn. |
2219 | 0 | for (const auto &Fact : ExitSet) { |
2220 | 0 | const FactEntry &ExitFact = FactMan[Fact]; |
2221 | |
|
2222 | 0 | FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact); |
2223 | 0 | if (EntryIt != EntrySet.end()) { |
2224 | 0 | if (join(FactMan[*EntryIt], ExitFact, |
2225 | 0 | EntryLEK != LEK_LockedSomeLoopIterations)) |
2226 | 0 | *EntryIt = Fact; |
2227 | 0 | } else if (!ExitFact.managed()) { |
2228 | 0 | ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc, |
2229 | 0 | EntryLEK, Handler); |
2230 | 0 | } |
2231 | 0 | } |
2232 | | |
2233 | | // Find locks in EntrySet that are not in ExitSet, and remove them. |
2234 | 0 | for (const auto &Fact : EntrySetOrig) { |
2235 | 0 | const FactEntry *EntryFact = &FactMan[Fact]; |
2236 | 0 | const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact); |
2237 | |
|
2238 | 0 | if (!ExitFact) { |
2239 | 0 | if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations) |
2240 | 0 | EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc, |
2241 | 0 | ExitLEK, Handler); |
2242 | 0 | if (ExitLEK == LEK_LockedSomePredecessors) |
2243 | 0 | EntrySet.removeLock(FactMan, *EntryFact); |
2244 | 0 | } |
2245 | 0 | } |
2246 | 0 | } |
2247 | | |
2248 | | // Return true if block B never continues to its successors. |
2249 | 0 | static bool neverReturns(const CFGBlock *B) { |
2250 | 0 | if (B->hasNoReturnElement()) |
2251 | 0 | return true; |
2252 | 0 | if (B->empty()) |
2253 | 0 | return false; |
2254 | | |
2255 | 0 | CFGElement Last = B->back(); |
2256 | 0 | if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) { |
2257 | 0 | if (isa<CXXThrowExpr>(S->getStmt())) |
2258 | 0 | return true; |
2259 | 0 | } |
2260 | 0 | return false; |
2261 | 0 | } |
2262 | | |
2263 | | /// Check a function's CFG for thread-safety violations. |
2264 | | /// |
2265 | | /// We traverse the blocks in the CFG, compute the set of mutexes that are held |
2266 | | /// at the end of each block, and issue warnings for thread safety violations. |
2267 | | /// Each block in the CFG is traversed exactly once. |
2268 | 0 | void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { |
2269 | | // TODO: this whole function needs be rewritten as a visitor for CFGWalker. |
2270 | | // For now, we just use the walker to set things up. |
2271 | 0 | threadSafety::CFGWalker walker; |
2272 | 0 | if (!walker.init(AC)) |
2273 | 0 | return; |
2274 | | |
2275 | | // AC.dumpCFG(true); |
2276 | | // threadSafety::printSCFG(walker); |
2277 | | |
2278 | 0 | CFG *CFGraph = walker.getGraph(); |
2279 | 0 | const NamedDecl *D = walker.getDecl(); |
2280 | 0 | CurrentFunction = dyn_cast<FunctionDecl>(D); |
2281 | |
|
2282 | 0 | if (D->hasAttr<NoThreadSafetyAnalysisAttr>()) |
2283 | 0 | return; |
2284 | | |
2285 | | // FIXME: Do something a bit more intelligent inside constructor and |
2286 | | // destructor code. Constructors and destructors must assume unique access |
2287 | | // to 'this', so checks on member variable access is disabled, but we should |
2288 | | // still enable checks on other objects. |
2289 | 0 | if (isa<CXXConstructorDecl>(D)) |
2290 | 0 | return; // Don't check inside constructors. |
2291 | 0 | if (isa<CXXDestructorDecl>(D)) |
2292 | 0 | return; // Don't check inside destructors. |
2293 | | |
2294 | 0 | Handler.enterFunction(CurrentFunction); |
2295 | |
|
2296 | 0 | BlockInfo.resize(CFGraph->getNumBlockIDs(), |
2297 | 0 | CFGBlockInfo::getEmptyBlockInfo(LocalVarMap)); |
2298 | | |
2299 | | // We need to explore the CFG via a "topological" ordering. |
2300 | | // That way, we will be guaranteed to have information about required |
2301 | | // predecessor locksets when exploring a new block. |
2302 | 0 | const PostOrderCFGView *SortedGraph = walker.getSortedGraph(); |
2303 | 0 | PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); |
2304 | |
|
2305 | 0 | CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()]; |
2306 | 0 | CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()]; |
2307 | | |
2308 | | // Mark entry block as reachable |
2309 | 0 | Initial.Reachable = true; |
2310 | | |
2311 | | // Compute SSA names for local variables |
2312 | 0 | LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); |
2313 | | |
2314 | | // Fill in source locations for all CFGBlocks. |
2315 | 0 | findBlockLocations(CFGraph, SortedGraph, BlockInfo); |
2316 | |
|
2317 | 0 | CapExprSet ExclusiveLocksAcquired; |
2318 | 0 | CapExprSet SharedLocksAcquired; |
2319 | 0 | CapExprSet LocksReleased; |
2320 | | |
2321 | | // Add locks from exclusive_locks_required and shared_locks_required |
2322 | | // to initial lockset. Also turn off checking for lock and unlock functions. |
2323 | | // FIXME: is there a more intelligent way to check lock/unlock functions? |
2324 | 0 | if (!SortedGraph->empty() && D->hasAttrs()) { |
2325 | 0 | assert(*SortedGraph->begin() == &CFGraph->getEntry()); |
2326 | 0 | FactSet &InitialLockset = Initial.EntrySet; |
2327 | |
|
2328 | 0 | CapExprSet ExclusiveLocksToAdd; |
2329 | 0 | CapExprSet SharedLocksToAdd; |
2330 | |
|
2331 | 0 | SourceLocation Loc = D->getLocation(); |
2332 | 0 | for (const auto *Attr : D->attrs()) { |
2333 | 0 | Loc = Attr->getLocation(); |
2334 | 0 | if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) { |
2335 | 0 | getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, |
2336 | 0 | nullptr, D); |
2337 | 0 | } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) { |
2338 | | // UNLOCK_FUNCTION() is used to hide the underlying lock implementation. |
2339 | | // We must ignore such methods. |
2340 | 0 | if (A->args_size() == 0) |
2341 | 0 | return; |
2342 | 0 | getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, |
2343 | 0 | nullptr, D); |
2344 | 0 | getMutexIDs(LocksReleased, A, nullptr, D); |
2345 | 0 | } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) { |
2346 | 0 | if (A->args_size() == 0) |
2347 | 0 | return; |
2348 | 0 | getMutexIDs(A->isShared() ? SharedLocksAcquired |
2349 | 0 | : ExclusiveLocksAcquired, |
2350 | 0 | A, nullptr, D); |
2351 | 0 | } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) { |
2352 | | // Don't try to check trylock functions for now. |
2353 | 0 | return; |
2354 | 0 | } else if (isa<SharedTrylockFunctionAttr>(Attr)) { |
2355 | | // Don't try to check trylock functions for now. |
2356 | 0 | return; |
2357 | 0 | } else if (isa<TryAcquireCapabilityAttr>(Attr)) { |
2358 | | // Don't try to check trylock functions for now. |
2359 | 0 | return; |
2360 | 0 | } |
2361 | 0 | } |
2362 | | |
2363 | | // FIXME -- Loc can be wrong here. |
2364 | 0 | for (const auto &Mu : ExclusiveLocksToAdd) { |
2365 | 0 | auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc, |
2366 | 0 | FactEntry::Declared); |
2367 | 0 | addLock(InitialLockset, std::move(Entry), true); |
2368 | 0 | } |
2369 | 0 | for (const auto &Mu : SharedLocksToAdd) { |
2370 | 0 | auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc, |
2371 | 0 | FactEntry::Declared); |
2372 | 0 | addLock(InitialLockset, std::move(Entry), true); |
2373 | 0 | } |
2374 | 0 | } |
2375 | | |
2376 | | // Compute the expected exit set. |
2377 | | // By default, we expect all locks held on entry to be held on exit. |
2378 | 0 | FactSet ExpectedFunctionExitSet = Initial.EntrySet; |
2379 | | |
2380 | | // Adjust the expected exit set by adding or removing locks, as declared |
2381 | | // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then |
2382 | | // issue the appropriate warning. |
2383 | | // FIXME: the location here is not quite right. |
2384 | 0 | for (const auto &Lock : ExclusiveLocksAcquired) |
2385 | 0 | ExpectedFunctionExitSet.addLock( |
2386 | 0 | FactMan, std::make_unique<LockableFactEntry>(Lock, LK_Exclusive, |
2387 | 0 | D->getLocation())); |
2388 | 0 | for (const auto &Lock : SharedLocksAcquired) |
2389 | 0 | ExpectedFunctionExitSet.addLock( |
2390 | 0 | FactMan, |
2391 | 0 | std::make_unique<LockableFactEntry>(Lock, LK_Shared, D->getLocation())); |
2392 | 0 | for (const auto &Lock : LocksReleased) |
2393 | 0 | ExpectedFunctionExitSet.removeLock(FactMan, Lock); |
2394 | |
|
2395 | 0 | for (const auto *CurrBlock : *SortedGraph) { |
2396 | 0 | unsigned CurrBlockID = CurrBlock->getBlockID(); |
2397 | 0 | CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; |
2398 | | |
2399 | | // Use the default initial lockset in case there are no predecessors. |
2400 | 0 | VisitedBlocks.insert(CurrBlock); |
2401 | | |
2402 | | // Iterate through the predecessor blocks and warn if the lockset for all |
2403 | | // predecessors is not the same. We take the entry lockset of the current |
2404 | | // block to be the intersection of all previous locksets. |
2405 | | // FIXME: By keeping the intersection, we may output more errors in future |
2406 | | // for a lock which is not in the intersection, but was in the union. We |
2407 | | // may want to also keep the union in future. As an example, let's say |
2408 | | // the intersection contains Mutex L, and the union contains L and M. |
2409 | | // Later we unlock M. At this point, we would output an error because we |
2410 | | // never locked M; although the real error is probably that we forgot to |
2411 | | // lock M on all code paths. Conversely, let's say that later we lock M. |
2412 | | // In this case, we should compare against the intersection instead of the |
2413 | | // union because the real error is probably that we forgot to unlock M on |
2414 | | // all code paths. |
2415 | 0 | bool LocksetInitialized = false; |
2416 | 0 | for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), |
2417 | 0 | PE = CurrBlock->pred_end(); PI != PE; ++PI) { |
2418 | | // if *PI -> CurrBlock is a back edge |
2419 | 0 | if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) |
2420 | 0 | continue; |
2421 | | |
2422 | 0 | unsigned PrevBlockID = (*PI)->getBlockID(); |
2423 | 0 | CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; |
2424 | | |
2425 | | // Ignore edges from blocks that can't return. |
2426 | 0 | if (neverReturns(*PI) || !PrevBlockInfo->Reachable) |
2427 | 0 | continue; |
2428 | | |
2429 | | // Okay, we can reach this block from the entry. |
2430 | 0 | CurrBlockInfo->Reachable = true; |
2431 | |
|
2432 | 0 | FactSet PrevLockset; |
2433 | 0 | getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock); |
2434 | |
|
2435 | 0 | if (!LocksetInitialized) { |
2436 | 0 | CurrBlockInfo->EntrySet = PrevLockset; |
2437 | 0 | LocksetInitialized = true; |
2438 | 0 | } else { |
2439 | | // Surprisingly 'continue' doesn't always produce back edges, because |
2440 | | // the CFG has empty "transition" blocks where they meet with the end |
2441 | | // of the regular loop body. We still want to diagnose them as loop. |
2442 | 0 | intersectAndWarn( |
2443 | 0 | CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc, |
2444 | 0 | isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt()) |
2445 | 0 | ? LEK_LockedSomeLoopIterations |
2446 | 0 | : LEK_LockedSomePredecessors); |
2447 | 0 | } |
2448 | 0 | } |
2449 | | |
2450 | | // Skip rest of block if it's not reachable. |
2451 | 0 | if (!CurrBlockInfo->Reachable) |
2452 | 0 | continue; |
2453 | | |
2454 | 0 | BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet); |
2455 | | |
2456 | | // Visit all the statements in the basic block. |
2457 | 0 | for (const auto &BI : *CurrBlock) { |
2458 | 0 | switch (BI.getKind()) { |
2459 | 0 | case CFGElement::Statement: { |
2460 | 0 | CFGStmt CS = BI.castAs<CFGStmt>(); |
2461 | 0 | LocksetBuilder.Visit(CS.getStmt()); |
2462 | 0 | break; |
2463 | 0 | } |
2464 | | // Ignore BaseDtor and MemberDtor for now. |
2465 | 0 | case CFGElement::AutomaticObjectDtor: { |
2466 | 0 | CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>(); |
2467 | 0 | const auto *DD = AD.getDestructorDecl(AC.getASTContext()); |
2468 | 0 | if (!DD->hasAttrs()) |
2469 | 0 | break; |
2470 | | |
2471 | 0 | LocksetBuilder.handleCall(nullptr, DD, |
2472 | 0 | SxBuilder.createVariable(AD.getVarDecl()), |
2473 | 0 | AD.getTriggerStmt()->getEndLoc()); |
2474 | 0 | break; |
2475 | 0 | } |
2476 | | |
2477 | 0 | case CFGElement::CleanupFunction: { |
2478 | 0 | const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>(); |
2479 | 0 | LocksetBuilder.handleCall(/*Exp=*/nullptr, CF.getFunctionDecl(), |
2480 | 0 | SxBuilder.createVariable(CF.getVarDecl()), |
2481 | 0 | CF.getVarDecl()->getLocation()); |
2482 | 0 | break; |
2483 | 0 | } |
2484 | | |
2485 | 0 | case CFGElement::TemporaryDtor: { |
2486 | 0 | auto TD = BI.castAs<CFGTemporaryDtor>(); |
2487 | | |
2488 | | // Clean up constructed object even if there are no attributes to |
2489 | | // keep the number of objects in limbo as small as possible. |
2490 | 0 | if (auto Object = ConstructedObjects.find( |
2491 | 0 | TD.getBindTemporaryExpr()->getSubExpr()); |
2492 | 0 | Object != ConstructedObjects.end()) { |
2493 | 0 | const auto *DD = TD.getDestructorDecl(AC.getASTContext()); |
2494 | 0 | if (DD->hasAttrs()) |
2495 | | // TODO: the location here isn't quite correct. |
2496 | 0 | LocksetBuilder.handleCall(nullptr, DD, Object->second, |
2497 | 0 | TD.getBindTemporaryExpr()->getEndLoc()); |
2498 | 0 | ConstructedObjects.erase(Object); |
2499 | 0 | } |
2500 | 0 | break; |
2501 | 0 | } |
2502 | 0 | default: |
2503 | 0 | break; |
2504 | 0 | } |
2505 | 0 | } |
2506 | 0 | CurrBlockInfo->ExitSet = LocksetBuilder.FSet; |
2507 | | |
2508 | | // For every back edge from CurrBlock (the end of the loop) to another block |
2509 | | // (FirstLoopBlock) we need to check that the Lockset of Block is equal to |
2510 | | // the one held at the beginning of FirstLoopBlock. We can look up the |
2511 | | // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. |
2512 | 0 | for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), |
2513 | 0 | SE = CurrBlock->succ_end(); SI != SE; ++SI) { |
2514 | | // if CurrBlock -> *SI is *not* a back edge |
2515 | 0 | if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) |
2516 | 0 | continue; |
2517 | | |
2518 | 0 | CFGBlock *FirstLoopBlock = *SI; |
2519 | 0 | CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()]; |
2520 | 0 | CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID]; |
2521 | 0 | intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc, |
2522 | 0 | LEK_LockedSomeLoopIterations); |
2523 | 0 | } |
2524 | 0 | } |
2525 | | |
2526 | | // Skip the final check if the exit block is unreachable. |
2527 | 0 | if (!Final.Reachable) |
2528 | 0 | return; |
2529 | | |
2530 | | // FIXME: Should we call this function for all blocks which exit the function? |
2531 | 0 | intersectAndWarn(ExpectedFunctionExitSet, Final.ExitSet, Final.ExitLoc, |
2532 | 0 | LEK_LockedAtEndOfFunction, LEK_NotLockedAtEndOfFunction); |
2533 | |
|
2534 | 0 | Handler.leaveFunction(CurrentFunction); |
2535 | 0 | } |
2536 | | |
2537 | | /// Check a function's CFG for thread-safety violations. |
2538 | | /// |
2539 | | /// We traverse the blocks in the CFG, compute the set of mutexes that are held |
2540 | | /// at the end of each block, and issue warnings for thread safety violations. |
2541 | | /// Each block in the CFG is traversed exactly once. |
2542 | | void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC, |
2543 | | ThreadSafetyHandler &Handler, |
2544 | 0 | BeforeSet **BSet) { |
2545 | 0 | if (!*BSet) |
2546 | 0 | *BSet = new BeforeSet; |
2547 | 0 | ThreadSafetyAnalyzer Analyzer(Handler, *BSet); |
2548 | 0 | Analyzer.runAnalysis(AC); |
2549 | 0 | } |
2550 | | |
2551 | 46 | void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; } |
2552 | | |
2553 | | /// Helper function that returns a LockKind required for the given level |
2554 | | /// of access. |
2555 | 0 | LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) { |
2556 | 0 | switch (AK) { |
2557 | 0 | case AK_Read : |
2558 | 0 | return LK_Shared; |
2559 | 0 | case AK_Written : |
2560 | 0 | return LK_Exclusive; |
2561 | 0 | } |
2562 | 0 | llvm_unreachable("Unknown AccessKind"); |
2563 | 0 | } |