Coverage Report

Created: 2025-07-18 06:51

/src/libtorrent/src/disk_job_fence.cpp
Line
Count
Source (jump to first uncovered line)
1
/*
2
3
Copyright (c) 2003, Daniel Wallin
4
Copyright (c) 2017, 2020, Alden Torres
5
Copyright (c) 2016-2020, 2022, Arvid Norberg
6
All rights reserved.
7
8
Redistribution and use in source and binary forms, with or without
9
modification, are permitted provided that the following conditions
10
are met:
11
12
    * Redistributions of source code must retain the above copyright
13
      notice, this list of conditions and the following disclaimer.
14
    * Redistributions in binary form must reproduce the above copyright
15
      notice, this list of conditions and the following disclaimer in
16
      the documentation and/or other materials provided with the distribution.
17
    * Neither the name of the author nor the names of its
18
      contributors may be used to endorse or promote products derived
19
      from this software without specific prior written permission.
20
21
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31
POSSIBILITY OF SUCH DAMAGE.
32
33
*/
34
35
36
#include "libtorrent/aux_/disk_job_fence.hpp"
37
#include "libtorrent/aux_/mmap_disk_job.hpp"
38
#include "libtorrent/performance_counters.hpp"
39
40
#define DEBUG_STORAGE 0
41
42
#if DEBUG_STORAGE
43
#define DLOG(...) std::fprintf(__VA_ARGS__)
44
#else
45
4
#define DLOG(...) do {} while (false)
46
#endif
47
48
namespace libtorrent {
49
namespace aux {
50
51
  int disk_job_fence::job_complete(mmap_disk_job* j, tailqueue<mmap_disk_job>& jobs)
52
2
  {
53
2
    std::lock_guard<std::mutex> l(m_mutex);
54
55
2
    TORRENT_ASSERT(j->flags & mmap_disk_job::in_progress);
56
2
    j->flags &= ~mmap_disk_job::in_progress;
57
58
2
    TORRENT_ASSERT(m_outstanding_jobs > 0);
59
2
    --m_outstanding_jobs;
60
2
    if (j->flags & mmap_disk_job::fence)
61
2
    {
62
      // a fence job just completed. Make sure the fence logic
63
      // works by asserting m_outstanding_jobs is in fact 0 now
64
2
      TORRENT_ASSERT(m_outstanding_jobs == 0);
65
66
      // the fence can now be lowered
67
2
      --m_has_fence;
68
69
      // now we need to post all jobs that have been queued up
70
      // while this fence was up. However, if there's another fence
71
      // in the queue, stop there and raise the fence again
72
2
      int ret = 0;
73
2
      while (!m_blocked_jobs.empty())
74
0
      {
75
0
        mmap_disk_job *bj = m_blocked_jobs.pop_front();
76
0
        if (bj->flags & mmap_disk_job::fence)
77
0
        {
78
          // we encountered another fence. We cannot post anymore
79
          // jobs from the blocked jobs queue. We have to go back
80
          // into a raised fence mode and wait for all current jobs
81
          // to complete. The exception is that if there are no jobs
82
          // executing currently, we should add the fence job.
83
0
          if (m_outstanding_jobs == 0 && jobs.empty())
84
0
          {
85
0
            TORRENT_ASSERT(!(bj->flags & mmap_disk_job::in_progress));
86
0
            bj->flags |= mmap_disk_job::in_progress;
87
0
            ++m_outstanding_jobs;
88
0
            ++ret;
89
0
#if TORRENT_USE_ASSERTS
90
0
            TORRENT_ASSERT(bj->blocked);
91
0
            bj->blocked = false;
92
0
#endif
93
0
            jobs.push_back(bj);
94
0
          }
95
0
          else
96
0
          {
97
            // put the fence job back in the blocked queue
98
0
            m_blocked_jobs.push_front(bj);
99
0
          }
100
0
          TORRENT_ASSERT(m_has_fence > 0 || m_blocked_jobs.size() == 0);
101
0
          return ret;
102
0
        }
103
0
        TORRENT_ASSERT(!(bj->flags & mmap_disk_job::in_progress));
104
0
        bj->flags |= mmap_disk_job::in_progress;
105
106
0
        ++m_outstanding_jobs;
107
0
        ++ret;
108
0
#if TORRENT_USE_ASSERTS
109
0
        TORRENT_ASSERT(bj->blocked);
110
0
        bj->blocked = false;
111
0
#endif
112
0
        jobs.push_back(bj);
113
0
      }
114
2
      return ret;
115
2
    }
116
117
    // there are still outstanding jobs, even if we have a
118
    // fence, it's not time to lower it yet
119
    // also, if we don't have a fence, we're done
120
0
    if (m_outstanding_jobs > 0 || m_has_fence == 0) return 0;
121
122
    // there's a fence raised, and no outstanding operations.
123
    // it means we can execute the fence job right now.
124
0
    TORRENT_ASSERT(m_blocked_jobs.size() > 0);
125
126
    // this is the fence job
127
0
    mmap_disk_job *bj = m_blocked_jobs.pop_front();
128
0
    TORRENT_ASSERT(bj->flags & mmap_disk_job::fence);
129
130
0
    TORRENT_ASSERT(!(bj->flags & mmap_disk_job::in_progress));
131
0
    bj->flags |= mmap_disk_job::in_progress;
132
133
0
    ++m_outstanding_jobs;
134
0
#if TORRENT_USE_ASSERTS
135
0
    TORRENT_ASSERT(bj->blocked);
136
0
    bj->blocked = false;
137
0
#endif
138
    // prioritize fence jobs since they're blocking other jobs
139
0
    jobs.push_front(bj);
140
0
    return 1;
141
0
  }
142
143
  bool disk_job_fence::is_blocked(mmap_disk_job* j)
144
0
  {
145
0
    std::lock_guard<std::mutex> l(m_mutex);
146
0
    DLOG(stderr, "[%p] is_blocked: fence: %d num_outstanding: %d\n"
147
0
      , static_cast<void*>(this), m_has_fence, int(m_outstanding_jobs));
148
149
    // if this is the job that raised the fence, don't block it
150
    // ignore fence can only ignore one fence. If there are several,
151
    // this job still needs to get queued up
152
0
    if (m_has_fence == 0)
153
0
    {
154
0
      TORRENT_ASSERT(!(j->flags & mmap_disk_job::in_progress));
155
0
      j->flags |= mmap_disk_job::in_progress;
156
0
      ++m_outstanding_jobs;
157
0
      return false;
158
0
    }
159
160
0
    m_blocked_jobs.push_back(j);
161
162
0
#if TORRENT_USE_ASSERTS
163
0
    TORRENT_ASSERT(j->blocked == false);
164
0
    j->blocked = true;
165
0
#endif
166
167
0
    return true;
168
0
  }
169
170
  bool disk_job_fence::has_fence() const
171
0
  {
172
0
    std::lock_guard<std::mutex> l(m_mutex);
173
0
    return m_has_fence != 0;
174
0
  }
175
176
  int disk_job_fence::num_blocked() const
177
0
  {
178
0
    std::lock_guard<std::mutex> l(m_mutex);
179
0
    return m_blocked_jobs.size();
180
0
  }
181
182
  // j is the fence job. It must have exclusive access to the storage
183
  int disk_job_fence::raise_fence(mmap_disk_job* j, counters& cnt)
184
2
  {
185
2
    TORRENT_ASSERT(!(j->flags & mmap_disk_job::in_progress));
186
2
    TORRENT_ASSERT(!(j->flags & mmap_disk_job::fence));
187
2
    j->flags |= mmap_disk_job::fence;
188
189
2
    std::lock_guard<std::mutex> l(m_mutex);
190
191
2
    DLOG(stderr, "[%p] raise_fence: fence: %d num_outstanding: %d\n"
192
2
      , static_cast<void*>(this), m_has_fence, int(m_outstanding_jobs));
193
194
2
    if (m_has_fence == 0 && m_outstanding_jobs == 0)
195
2
    {
196
2
      ++m_has_fence;
197
2
      DLOG(stderr, "[%p] raise_fence: need posting\n"
198
2
        , static_cast<void*>(this));
199
200
      // the job j is expected to be put on the job queue
201
      // after this, without being passed through is_blocked()
202
      // that's why we're accounting for it here
203
204
2
      j->flags |= mmap_disk_job::in_progress;
205
2
      ++m_outstanding_jobs;
206
2
      return fence_post_fence;
207
2
    }
208
209
0
    ++m_has_fence;
210
0
#if TORRENT_USE_ASSERTS
211
0
    TORRENT_ASSERT(j->blocked == false);
212
0
    j->blocked = true;
213
0
#endif
214
0
    m_blocked_jobs.push_back(j);
215
0
    cnt.inc_stats_counter(counters::blocked_disk_jobs);
216
217
0
    return fence_post_none;
218
2
  }
219
220
}
221
}