Coverage Report

Created: 2025-11-16 06:57

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/samba/source3/smbd/blocking.c
Line
Count
Source
1
/* 
2
   Unix SMB/CIFS implementation.
3
   Blocking Locking functions
4
   Copyright (C) Jeremy Allison 1998-2003
5
6
   This program is free software; you can redistribute it and/or modify
7
   it under the terms of the GNU General Public License as published by
8
   the Free Software Foundation; either version 3 of the License, or
9
   (at your option) any later version.
10
11
   This program is distributed in the hope that it will be useful,
12
   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
   GNU General Public License for more details.
15
16
   You should have received a copy of the GNU General Public License
17
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
18
*/
19
20
#include "includes.h"
21
#include "locking/share_mode_lock.h"
22
#include "smbd/smbd.h"
23
#include "smbd/globals.h"
24
#include "messages.h"
25
#include "lib/util/tevent_ntstatus.h"
26
#include "lib/dbwrap/dbwrap_watch.h"
27
#include "librpc/gen_ndr/ndr_open_files.h"
28
29
#undef DBGC_CLASS
30
0
#define DBGC_CLASS DBGC_LOCKING
31
32
NTSTATUS smbd_do_locks_try(struct byte_range_lock *br_lck,
33
         struct smbd_do_locks_state *state)
34
0
{
35
0
  bool unlock_ok;
36
0
  uint16_t i;
37
0
  NTSTATUS status = NT_STATUS_OK;
38
39
0
  for (i = 0; i < state->num_locks; i++) {
40
0
    struct smbd_lock_element *e = &state->locks[i];
41
42
0
    status = do_lock(
43
0
      br_lck,
44
0
      state->locks, /* req_mem_ctx */
45
0
      &e->req_guid,
46
0
      e->smblctx,
47
0
      e->count,
48
0
      e->offset,
49
0
      e->brltype,
50
0
      e->lock_flav,
51
0
      &state->blocking_pid,
52
0
      &state->blocking_smblctx);
53
0
    if (!NT_STATUS_IS_OK(status)) {
54
0
      break;
55
0
    }
56
0
  }
57
58
0
  if (NT_STATUS_IS_OK(status)) {
59
0
    return NT_STATUS_OK;
60
0
  }
61
62
0
  state->blocker_idx = i;
63
0
  unlock_ok = true;
64
65
  /*
66
   * Undo the locks we successfully got
67
   */
68
0
  for (i = i-1; i != UINT16_MAX; i--) {
69
0
    struct smbd_lock_element *e = &state->locks[i];
70
0
    NTSTATUS ulstatus;
71
72
0
    ulstatus = do_unlock(br_lck,
73
0
             e->smblctx,
74
0
             e->count,
75
0
             e->offset,
76
0
             e->lock_flav);
77
0
    if (!NT_STATUS_IS_OK(ulstatus)) {
78
0
      DBG_DEBUG("Failed to undo lock flavour %s lock "
79
0
          "type %s start=%"PRIu64" len=%"PRIu64" "
80
0
          "requested for file [%s]\n",
81
0
          lock_flav_name(e->lock_flav),
82
0
          lock_type_name(e->brltype),
83
0
          e->offset,
84
0
          e->count,
85
0
          fsp_str_dbg(brl_fsp(br_lck)));
86
0
      unlock_ok = false;
87
0
    }
88
0
  }
89
0
  if (unlock_ok) {
90
0
    brl_set_modified(br_lck, false);
91
0
  }
92
93
0
  return status;
94
0
}
95
96
static bool smbd_smb1_fsp_add_blocked_lock_req(
97
  struct files_struct *fsp, struct tevent_req *req)
98
0
{
99
0
  size_t num_reqs = talloc_array_length(fsp->blocked_smb1_lock_reqs);
100
0
  struct tevent_req **tmp = NULL;
101
102
0
  tmp = talloc_realloc(
103
0
    fsp,
104
0
    fsp->blocked_smb1_lock_reqs,
105
0
    struct tevent_req *,
106
0
    num_reqs+1);
107
0
  if (tmp == NULL) {
108
0
    return false;
109
0
  }
110
0
  fsp->blocked_smb1_lock_reqs = tmp;
111
0
  fsp->blocked_smb1_lock_reqs[num_reqs] = req;
112
0
  return true;
113
0
}
114
115
struct smbd_smb1_do_locks_state {
116
  struct tevent_context *ev;
117
  struct smb_request *smbreq;
118
  struct files_struct *fsp;
119
  uint32_t timeout;
120
  uint32_t polling_msecs;
121
  uint32_t retry_msecs;
122
  struct timeval endtime;
123
  bool large_offset;  /* required for correct cancel */
124
  uint16_t num_locks;
125
  struct smbd_lock_element *locks;
126
  uint16_t blocker;
127
  NTSTATUS deny_status;
128
};
129
130
static void smbd_smb1_do_locks_try(struct tevent_req *req);
131
static void smbd_smb1_do_locks_retry(struct tevent_req *subreq);
132
static void smbd_smb1_blocked_locks_cleanup(
133
  struct tevent_req *req, enum tevent_req_state req_state);
134
135
static void smbd_smb1_do_locks_setup_timeout(
136
  struct smbd_smb1_do_locks_state *state,
137
  const struct smbd_lock_element *blocker)
138
0
{
139
0
  struct files_struct *fsp = state->fsp;
140
141
0
  if (!timeval_is_zero(&state->endtime)) {
142
    /*
143
     * already done
144
     */
145
0
    return;
146
0
  }
147
148
0
  if ((state->timeout != 0) && (state->timeout != UINT32_MAX)) {
149
    /*
150
     * Windows internal resolution for blocking locks
151
     * seems to be about 200ms... Don't wait for less than
152
     * that. JRA.
153
     */
154
0
    state->timeout = MAX(state->timeout, lp_lock_spin_time());
155
0
  }
156
157
0
  if (state->timeout != 0) {
158
0
    goto set_endtime;
159
0
  }
160
161
0
  if (blocker == NULL) {
162
0
    goto set_endtime;
163
0
  }
164
165
0
  if ((blocker->offset >= 0xEF000000) &&
166
0
      ((blocker->offset >> 63) == 0)) {
167
    /*
168
     * This must be an optimization of an ancient
169
     * application bug...
170
     */
171
0
    state->timeout = lp_lock_spin_time();
172
0
  }
173
174
0
  if (fsp->fsp_flags.lock_failure_seen &&
175
0
      (blocker->offset == fsp->lock_failure_offset)) {
176
    /*
177
     * Delay repeated lock attempts on the same
178
     * lock. Maybe a more advanced version of the
179
     * above check?
180
     */
181
0
    DBG_DEBUG("Delaying lock request due to previous "
182
0
        "failure\n");
183
0
    state->timeout = lp_lock_spin_time();
184
0
  }
185
186
0
set_endtime:
187
  /*
188
   * Note state->timeout might still 0,
189
   * but that's ok, as we don't want to retry
190
   * in that case.
191
   */
192
0
  state->endtime = timeval_add(&state->smbreq->request_time,
193
0
             state->timeout / 1000,
194
0
             (state->timeout % 1000) * 1000);
195
0
}
196
197
static void smbd_smb1_do_locks_update_retry_msecs(
198
  struct smbd_smb1_do_locks_state *state)
199
0
{
200
  /*
201
   * The default lp_lock_spin_time() is 200ms,
202
   * we just use half of it to trigger the first retry.
203
   *
204
   * v_min is in the range of 0.001 to 10 secs
205
   * (0.1 secs by default)
206
   *
207
   * v_max is in the range of 0.01 to 100 secs
208
   * (1.0 secs by default)
209
   *
210
   * The typical steps are:
211
   * 0.1, 0.2, 0.3, 0.4, ... 1.0
212
   */
213
0
  uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()))/2;
214
0
  uint32_t v_max = 10 * v_min;
215
216
0
  if (state->retry_msecs >= v_max) {
217
0
    state->retry_msecs = v_max;
218
0
    return;
219
0
  }
220
221
0
  state->retry_msecs += v_min;
222
0
}
223
224
static void smbd_smb1_do_locks_update_polling_msecs(
225
  struct smbd_smb1_do_locks_state *state)
226
0
{
227
  /*
228
   * The default lp_lock_spin_time() is 200ms.
229
   *
230
   * v_min is in the range of 0.002 to 20 secs
231
   * (0.2 secs by default)
232
   *
233
   * v_max is in the range of 0.02 to 200 secs
234
   * (2.0 secs by default)
235
   *
236
   * The typical steps are:
237
   * 0.2, 0.4, 0.6, 0.8, ... 2.0
238
   */
239
0
  uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()));
240
0
  uint32_t v_max = 10 * v_min;
241
242
0
  if (state->polling_msecs >= v_max) {
243
0
    state->polling_msecs = v_max;
244
0
    return;
245
0
  }
246
247
0
  state->polling_msecs += v_min;
248
0
}
249
250
struct tevent_req *smbd_smb1_do_locks_send(
251
  TALLOC_CTX *mem_ctx,
252
  struct tevent_context *ev,
253
  struct smb_request **smbreq, /* talloc_move()d into our state */
254
  struct files_struct *fsp,
255
  uint32_t lock_timeout,
256
  bool large_offset,
257
  uint16_t num_locks,
258
  struct smbd_lock_element *locks)
259
0
{
260
0
  struct tevent_req *req = NULL;
261
0
  struct smbd_smb1_do_locks_state *state = NULL;
262
0
  bool ok;
263
264
0
  req = tevent_req_create(
265
0
    mem_ctx, &state, struct smbd_smb1_do_locks_state);
266
0
  if (req == NULL) {
267
0
    return NULL;
268
0
  }
269
0
  state->ev = ev;
270
0
  state->smbreq = talloc_move(state, smbreq);
271
0
  state->fsp = fsp;
272
0
  state->timeout = lock_timeout;
273
0
  state->large_offset = large_offset;
274
0
  state->num_locks = num_locks;
275
0
  state->locks = locks;
276
0
  state->deny_status = NT_STATUS_LOCK_NOT_GRANTED;
277
278
0
  DBG_DEBUG("state=%p, state->smbreq=%p\n", state, state->smbreq);
279
280
0
  if (num_locks == 0 || locks == NULL) {
281
0
    DBG_DEBUG("no locks\n");
282
0
    tevent_req_done(req);
283
0
    return tevent_req_post(req, ev);
284
0
  }
285
286
0
  if (state->locks[0].lock_flav == POSIX_LOCK) {
287
    /*
288
     * SMB1 posix locks always use
289
     * NT_STATUS_FILE_LOCK_CONFLICT.
290
     */
291
0
    state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
292
0
  }
293
294
0
  smbd_smb1_do_locks_try(req);
295
0
  if (!tevent_req_is_in_progress(req)) {
296
0
    return tevent_req_post(req, ev);
297
0
  }
298
299
0
  ok = smbd_smb1_fsp_add_blocked_lock_req(fsp, req);
300
0
  if (!ok) {
301
0
    tevent_req_oom(req);
302
0
    return tevent_req_post(req, ev);
303
0
  }
304
0
  tevent_req_set_cleanup_fn(req, smbd_smb1_blocked_locks_cleanup);
305
0
  return req;
306
0
}
307
308
static void smbd_smb1_blocked_locks_cleanup(
309
  struct tevent_req *req, enum tevent_req_state req_state)
310
0
{
311
0
  struct smbd_smb1_do_locks_state *state = tevent_req_data(
312
0
    req, struct smbd_smb1_do_locks_state);
313
0
  struct files_struct *fsp = state->fsp;
314
0
  struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
315
0
  size_t num_blocked = talloc_array_length(blocked);
316
0
  size_t i;
317
318
0
  DBG_DEBUG("req=%p, state=%p, req_state=%d\n",
319
0
      req,
320
0
      state,
321
0
      (int)req_state);
322
323
0
  if (req_state == TEVENT_REQ_RECEIVED) {
324
0
    DBG_DEBUG("already received\n");
325
0
    return;
326
0
  }
327
328
0
  for (i=0; i<num_blocked; i++) {
329
0
    if (blocked[i] == req) {
330
0
      break;
331
0
    }
332
0
  }
333
0
  SMB_ASSERT(i<num_blocked);
334
335
0
  ARRAY_DEL_ELEMENT(blocked, i, num_blocked);
336
337
0
  fsp->blocked_smb1_lock_reqs = talloc_realloc(
338
0
    fsp, blocked, struct tevent_req *, num_blocked-1);
339
0
}
340
341
static NTSTATUS smbd_smb1_do_locks_check_blocked(
342
  uint16_t num_blocked,
343
  struct smbd_lock_element *blocked,
344
  uint16_t num_locks,
345
  struct smbd_lock_element *locks,
346
  uint16_t *blocker_idx,
347
  uint64_t *blocking_smblctx)
348
0
{
349
0
  uint16_t li;
350
351
0
  for (li=0; li < num_locks; li++) {
352
0
    struct smbd_lock_element *l = &locks[li];
353
0
    uint16_t bi;
354
0
    bool valid;
355
356
0
    valid = byte_range_valid(l->offset, l->count);
357
0
    if (!valid) {
358
0
      return NT_STATUS_INVALID_LOCK_RANGE;
359
0
    }
360
361
0
    for (bi = 0; bi < num_blocked; bi++) {
362
0
      struct smbd_lock_element *b = &blocked[li];
363
0
      bool overlap;
364
365
      /* Read locks never conflict. */
366
0
      if (l->brltype == READ_LOCK && b->brltype == READ_LOCK) {
367
0
        continue;
368
0
      }
369
370
0
      overlap = byte_range_overlap(l->offset,
371
0
                 l->count,
372
0
                 b->offset,
373
0
                 b->count);
374
0
      if (!overlap) {
375
0
        continue;
376
0
      }
377
378
0
      *blocker_idx = li;
379
0
      *blocking_smblctx = b->smblctx;
380
0
      return NT_STATUS_LOCK_NOT_GRANTED;
381
0
    }
382
0
  }
383
384
0
  return NT_STATUS_OK;
385
0
}
386
387
static void smbd_smb1_do_locks_try_fn(struct share_mode_lock *lck,
388
              struct byte_range_lock *br_lck,
389
              void *private_data)
390
0
{
391
0
  struct tevent_req *req = talloc_get_type_abort(
392
0
    private_data, struct tevent_req);
393
0
  struct smbd_smb1_do_locks_state *state = tevent_req_data(
394
0
    req, struct smbd_smb1_do_locks_state);
395
0
  struct smbd_do_locks_state brl_state;
396
0
  struct files_struct *fsp = state->fsp;
397
0
  struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
398
0
  size_t num_blocked = talloc_array_length(blocked);
399
0
  struct timeval endtime = { 0 };
400
0
  struct tevent_req *subreq = NULL;
401
0
  size_t bi;
402
0
  NTSTATUS status;
403
0
  bool ok;
404
0
  bool expired;
405
406
  /*
407
   * The caller has checked fsp->fsp_flags.can_lock and lp_locking so
408
   * br_lck has to be there!
409
   */
410
0
  SMB_ASSERT(br_lck != NULL);
411
412
0
  brl_state = (struct smbd_do_locks_state) {
413
0
    .num_locks = state->num_locks,
414
0
    .locks = state->locks,
415
0
  };
416
417
  /*
418
   * We check the pending/blocked requests
419
   * from the oldest to the youngest request.
420
   *
421
   * Note due to the retry logic the current request
422
   * might already be in the list.
423
   */
424
425
0
  for (bi = 0; bi < num_blocked; bi++) {
426
0
    struct smbd_smb1_do_locks_state *blocked_state =
427
0
      tevent_req_data(blocked[bi],
428
0
      struct smbd_smb1_do_locks_state);
429
430
0
    if (blocked_state->locks == state->locks) {
431
0
      SMB_ASSERT(blocked_state->num_locks == state->num_locks);
432
433
      /*
434
       * We found ourself...
435
       */
436
0
      break;
437
0
    }
438
439
0
    status = smbd_smb1_do_locks_check_blocked(
440
0
        blocked_state->num_locks,
441
0
        blocked_state->locks,
442
0
        state->num_locks,
443
0
        state->locks,
444
0
        &brl_state.blocker_idx,
445
0
        &brl_state.blocking_smblctx);
446
0
    if (!NT_STATUS_IS_OK(status)) {
447
0
      brl_state.blocking_pid = messaging_server_id(
448
0
        fsp->conn->sconn->msg_ctx);
449
0
      goto check_retry;
450
0
    }
451
0
  }
452
453
0
  status = smbd_do_locks_try(br_lck, &brl_state);
454
0
  if (NT_STATUS_IS_OK(status)) {
455
0
    goto done;
456
0
  }
457
458
0
  state->blocker = brl_state.blocker_idx;
459
460
0
  if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
461
    /*
462
     * We got NT_STATUS_RETRY,
463
     * we reset polling_msecs so that
464
     * that the retries based on LOCK_NOT_GRANTED
465
     * will later start with small intervals again.
466
     */
467
0
    state->polling_msecs = 0;
468
469
    /*
470
     * The backend wasn't able to decide yet.
471
     * We need to wait even for non-blocking
472
     * locks.
473
     *
474
     * The backend uses blocking_smblctx == UINT64_MAX
475
     * to indicate that we should use retry timers.
476
     *
477
     * It uses blocking_smblctx == 0 to indicate
478
     * it will use share_mode_wakeup_waiters()
479
     * to wake us. Note that unrelated changes in
480
     * locking.tdb may cause retries.
481
     */
482
483
0
    if (brl_state.blocking_smblctx != UINT64_MAX) {
484
0
      SMB_ASSERT(brl_state.blocking_smblctx == 0);
485
0
      goto setup_retry;
486
0
    }
487
488
0
    smbd_smb1_do_locks_update_retry_msecs(state);
489
490
0
    DBG_DEBUG("Waiting for a backend decision. "
491
0
        "Retry in %"PRIu32" msecs\n",
492
0
        state->retry_msecs);
493
494
    /*
495
     * We completely ignore state->endtime here
496
     * we we'll wait for a backend decision forever.
497
     * If the backend is smart enough to implement
498
     * some NT_STATUS_RETRY logic, it has to
499
     * switch to any other status after in order
500
     * to avoid waiting forever.
501
     */
502
0
    endtime = timeval_current_ofs_msec(state->retry_msecs);
503
0
    goto setup_retry;
504
0
  }
505
506
0
check_retry:
507
0
  if (!ERROR_WAS_LOCK_DENIED(status)) {
508
0
    goto done;
509
0
  }
510
  /*
511
   * We got LOCK_NOT_GRANTED, make sure
512
   * a following STATUS_RETRY will start
513
   * with short intervals again.
514
   */
515
0
  state->retry_msecs = 0;
516
517
0
  smbd_smb1_do_locks_setup_timeout(state, &state->locks[state->blocker]);
518
0
  DBG_DEBUG("timeout=%"PRIu32", blocking_smblctx=%"PRIu64"\n",
519
0
      state->timeout,
520
0
      brl_state.blocking_smblctx);
521
522
  /*
523
   * The client specified timeout expired
524
   * avoid further retries.
525
   *
526
   * Otherwise keep waiting either waiting
527
   * for changes in locking.tdb or the polling
528
   * mode timers waiting for posix locks.
529
   *
530
   * If the endtime is not expired yet,
531
   * it means we'll retry after a timeout.
532
   * In that case we'll have to return
533
   * NT_STATUS_FILE_LOCK_CONFLICT
534
   * instead of NT_STATUS_LOCK_NOT_GRANTED.
535
   */
536
0
  expired = timeval_expired(&state->endtime);
537
0
  if (expired) {
538
0
    status = state->deny_status;
539
0
    goto done;
540
0
  }
541
0
  state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
542
543
0
  endtime = state->endtime;
544
545
0
  if (brl_state.blocking_smblctx == UINT64_MAX) {
546
0
    struct timeval tmp;
547
548
0
    smbd_smb1_do_locks_update_polling_msecs(state);
549
550
0
    DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
551
0
        state->polling_msecs);
552
553
0
    tmp = timeval_current_ofs_msec(state->polling_msecs);
554
0
    endtime = timeval_min(&endtime, &tmp);
555
0
  }
556
557
0
setup_retry:
558
0
  subreq = share_mode_watch_send(
559
0
    state, state->ev, &state->fsp->file_id, brl_state.blocking_pid);
560
0
  if (tevent_req_nomem(subreq, req)) {
561
0
    status = NT_STATUS_NO_MEMORY;
562
0
    goto done;
563
0
  }
564
0
  tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
565
566
0
  if (timeval_is_zero(&endtime)) {
567
0
    return;
568
0
  }
569
570
0
  ok = tevent_req_set_endtime(subreq, state->ev, endtime);
571
0
  if (!ok) {
572
0
    status = NT_STATUS_NO_MEMORY;
573
0
    goto done;
574
0
  }
575
0
  return;
576
0
done:
577
0
  smbd_smb1_brl_finish_by_req(req, status);
578
0
}
579
580
static void smbd_smb1_do_locks_try(struct tevent_req *req)
581
0
{
582
0
  struct smbd_smb1_do_locks_state *state = tevent_req_data(
583
0
    req, struct smbd_smb1_do_locks_state);
584
0
  NTSTATUS status;
585
586
0
  if (!state->fsp->fsp_flags.can_lock) {
587
0
    if (state->fsp->fsp_flags.is_directory) {
588
0
      return smbd_smb1_brl_finish_by_req(req,
589
0
          NT_STATUS_INVALID_DEVICE_REQUEST);
590
0
    }
591
0
    return smbd_smb1_brl_finish_by_req(req,
592
0
               NT_STATUS_INVALID_HANDLE);
593
0
  }
594
595
0
  if (!lp_locking(state->fsp->conn->params)) {
596
0
    return smbd_smb1_brl_finish_by_req(req, NT_STATUS_OK);
597
0
  }
598
599
0
  status = share_mode_do_locked_brl(state->fsp,
600
0
            smbd_smb1_do_locks_try_fn,
601
0
            req);
602
0
  if (!NT_STATUS_IS_OK(status)) {
603
0
    smbd_smb1_brl_finish_by_req(req, status);
604
0
    return;
605
0
  }
606
0
  return;
607
0
}
608
609
static void smbd_smb1_do_locks_retry(struct tevent_req *subreq)
610
0
{
611
0
  struct tevent_req *req = tevent_req_callback_data(
612
0
    subreq, struct tevent_req);
613
0
  struct smbd_smb1_do_locks_state *state = tevent_req_data(
614
0
    req, struct smbd_smb1_do_locks_state);
615
0
  NTSTATUS status;
616
0
  bool ok;
617
618
  /*
619
   * Make sure we run as the user again
620
   */
621
0
  ok = change_to_user_and_service_by_fsp(state->fsp);
622
0
  if (!ok) {
623
0
    tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
624
0
    return;
625
0
  }
626
627
0
  status = share_mode_watch_recv(subreq, NULL, NULL);
628
0
  TALLOC_FREE(subreq);
629
630
0
  DBG_DEBUG("share_mode_watch_recv returned %s\n",
631
0
      nt_errstr(status));
632
633
  /*
634
   * We ignore any errors here, it's most likely
635
   * we just get NT_STATUS_OK or NT_STATUS_IO_TIMEOUT.
636
   *
637
   * In any case we can just give it a retry.
638
   */
639
640
0
  smbd_smb1_do_locks_try(req);
641
0
}
642
643
NTSTATUS smbd_smb1_do_locks_recv(struct tevent_req *req)
644
0
{
645
0
  struct smbd_smb1_do_locks_state *state = tevent_req_data(
646
0
    req, struct smbd_smb1_do_locks_state);
647
0
  NTSTATUS status = NT_STATUS_OK;
648
0
  bool err;
649
650
0
  err = tevent_req_is_nterror(req, &status);
651
652
0
  DBG_DEBUG("err=%d, status=%s\n", (int)err, nt_errstr(status));
653
654
0
  if (tevent_req_is_nterror(req, &status)) {
655
0
    struct files_struct *fsp = state->fsp;
656
0
    struct smbd_lock_element *blocker =
657
0
      &state->locks[state->blocker];
658
659
0
    DBG_DEBUG("Setting lock_failure_offset=%"PRIu64"\n",
660
0
        blocker->offset);
661
662
0
    fsp->fsp_flags.lock_failure_seen = true;
663
0
    fsp->lock_failure_offset = blocker->offset;
664
0
    return status;
665
0
  }
666
667
0
  tevent_req_received(req);
668
669
0
  return NT_STATUS_OK;
670
0
}
671
672
bool smbd_smb1_do_locks_extract_smbreq(
673
  struct tevent_req *req,
674
  TALLOC_CTX *mem_ctx,
675
  struct smb_request **psmbreq)
676
0
{
677
0
  struct smbd_smb1_do_locks_state *state = tevent_req_data(
678
0
    req, struct smbd_smb1_do_locks_state);
679
680
0
  DBG_DEBUG("req=%p, state=%p, state->smbreq=%p\n",
681
0
      req,
682
0
      state,
683
0
      state->smbreq);
684
685
0
  if (state->smbreq == NULL) {
686
0
    return false;
687
0
  }
688
0
  *psmbreq = talloc_move(mem_ctx, &state->smbreq);
689
0
  return true;
690
0
}
691
692
void smbd_smb1_brl_finish_by_req(struct tevent_req *req, NTSTATUS status)
693
0
{
694
0
  DBG_DEBUG("req=%p, status=%s\n", req, nt_errstr(status));
695
696
0
  if (NT_STATUS_IS_OK(status)) {
697
0
    tevent_req_done(req);
698
0
  } else {
699
0
    tevent_req_nterror(req, status);
700
0
  }
701
0
}
702
703
bool smbd_smb1_brl_finish_by_lock(
704
  struct files_struct *fsp,
705
  bool large_offset,
706
  struct smbd_lock_element lock,
707
  NTSTATUS finish_status)
708
0
{
709
0
  struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
710
0
  size_t num_blocked = talloc_array_length(blocked);
711
0
  size_t i;
712
713
0
  DBG_DEBUG("num_blocked=%zu\n", num_blocked);
714
715
0
  for (i=0; i<num_blocked; i++) {
716
0
    struct tevent_req *req = blocked[i];
717
0
    struct smbd_smb1_do_locks_state *state = tevent_req_data(
718
0
      req, struct smbd_smb1_do_locks_state);
719
0
    uint16_t j;
720
721
0
    DBG_DEBUG("i=%zu, req=%p\n", i, req);
722
723
0
    if (state->large_offset != large_offset) {
724
0
      continue;
725
0
    }
726
727
0
    for (j=0; j<state->num_locks; j++) {
728
0
      struct smbd_lock_element *l = &state->locks[j];
729
730
0
      if ((lock.smblctx == l->smblctx) &&
731
0
          (lock.offset == l->offset) &&
732
0
          (lock.count == l->count)) {
733
0
        smbd_smb1_brl_finish_by_req(
734
0
          req, finish_status);
735
0
        return true;
736
0
      }
737
0
    }
738
0
  }
739
0
  return false;
740
0
}
741
742
static struct files_struct *smbd_smb1_brl_finish_by_mid_fn(
743
  struct files_struct *fsp, void *private_data)
744
0
{
745
0
  struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
746
0
  size_t num_blocked = talloc_array_length(blocked);
747
0
  uint64_t mid = *((uint64_t *)private_data);
748
0
  size_t i;
749
750
0
  DBG_DEBUG("fsp=%p, num_blocked=%zu\n", fsp, num_blocked);
751
752
0
  for (i=0; i<num_blocked; i++) {
753
0
    struct tevent_req *req = blocked[i];
754
0
    struct smbd_smb1_do_locks_state *state = tevent_req_data(
755
0
      req, struct smbd_smb1_do_locks_state);
756
0
    struct smb_request *smbreq = state->smbreq;
757
758
0
    if (smbreq->mid == mid) {
759
0
      tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
760
0
      return fsp;
761
0
    }
762
0
  }
763
764
0
  return NULL;
765
0
}
766
767
/*
768
 * This walks the list of fsps, we store the blocked reqs attached to
769
 * them. It can be expensive, but this is legacy SMB1 and trying to
770
 * remember looking at traces I don't really see many of those calls.
771
 */
772
773
bool smbd_smb1_brl_finish_by_mid(
774
  struct smbd_server_connection *sconn, uint64_t mid)
775
0
{
776
0
  struct files_struct *found = files_forall(
777
0
    sconn, smbd_smb1_brl_finish_by_mid_fn, &mid);
778
  return (found != NULL);
779
0
}