Coverage Report

Created: 2025-11-16 06:57

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/samba/source3/smbd/smb2_lock.c
Line
Count
Source
1
/*
2
   Unix SMB/CIFS implementation.
3
   Core SMB2 server
4
5
   Copyright (C) Stefan Metzmacher 2009
6
   Copyright (C) Jeremy Allison 2010
7
8
   This program is free software; you can redistribute it and/or modify
9
   it under the terms of the GNU General Public License as published by
10
   the Free Software Foundation; either version 3 of the License, or
11
   (at your option) any later version.
12
13
   This program is distributed in the hope that it will be useful,
14
   but WITHOUT ANY WARRANTY; without even the implied warranty of
15
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
   GNU General Public License for more details.
17
18
   You should have received a copy of the GNU General Public License
19
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
20
*/
21
22
#include "includes.h"
23
#include "locking/share_mode_lock.h"
24
#include "smbd/smbd.h"
25
#include "smbd/globals.h"
26
#include "../libcli/smb/smb_common.h"
27
#include "../lib/util/tevent_ntstatus.h"
28
#include "lib/dbwrap/dbwrap_watch.h"
29
#include "librpc/gen_ndr/open_files.h"
30
#include "messages.h"
31
32
#undef DBGC_CLASS
33
0
#define DBGC_CLASS DBGC_SMB2
34
35
struct smbd_smb2_lock_element {
36
  uint64_t offset;
37
  uint64_t length;
38
  uint32_t flags;
39
};
40
41
struct smbd_smb2_lock_state {
42
  struct tevent_context *ev;
43
  struct smbd_smb2_request *smb2req;
44
  struct smb_request *smb1req;
45
  struct files_struct *fsp;
46
  bool blocking;
47
  uint32_t polling_msecs;
48
  uint32_t retry_msecs;
49
  uint16_t lock_count;
50
  struct smbd_lock_element *locks;
51
  uint8_t lock_sequence_value;
52
  uint8_t *lock_sequence_element;
53
};
54
55
static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
56
             struct tevent_context *ev,
57
             struct smbd_smb2_request *smb2req,
58
             struct files_struct *in_fsp,
59
             uint32_t in_lock_sequence,
60
             uint16_t in_lock_count,
61
             struct smbd_smb2_lock_element *in_locks);
62
static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
63
64
static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
65
NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
66
0
{
67
0
  const uint8_t *inbody;
68
0
  uint16_t in_lock_count;
69
0
  uint32_t in_lock_sequence;
70
0
  uint64_t in_file_id_persistent;
71
0
  uint64_t in_file_id_volatile;
72
0
  struct files_struct *in_fsp;
73
0
  struct smbd_smb2_lock_element *in_locks;
74
0
  struct tevent_req *subreq;
75
0
  const uint8_t *lock_buffer;
76
0
  uint16_t l;
77
0
  NTSTATUS status;
78
79
0
  status = smbd_smb2_request_verify_sizes(req, 0x30);
80
0
  if (!NT_STATUS_IS_OK(status)) {
81
0
    return smbd_smb2_request_error(req, status);
82
0
  }
83
0
  inbody = SMBD_SMB2_IN_BODY_PTR(req);
84
85
0
  in_lock_count     = CVAL(inbody, 0x02);
86
0
  if (req->xconn->protocol >= PROTOCOL_SMB2_10) {
87
0
    in_lock_sequence  = IVAL(inbody, 0x04);
88
0
  } else {
89
    /* 0x04 - 4 bytes reserved */
90
0
    in_lock_sequence  = 0;
91
0
  }
92
0
  in_file_id_persistent   = BVAL(inbody, 0x08);
93
0
  in_file_id_volatile   = BVAL(inbody, 0x10);
94
95
0
  if (in_lock_count < 1) {
96
0
    return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
97
0
  }
98
99
0
  if (((in_lock_count - 1) * 0x18) > SMBD_SMB2_IN_DYN_LEN(req)) {
100
0
    return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
101
0
  }
102
103
0
  in_locks = talloc_array(req, struct smbd_smb2_lock_element,
104
0
        in_lock_count);
105
0
  if (in_locks == NULL) {
106
0
    return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
107
0
  }
108
109
0
  l = 0;
110
0
  lock_buffer = inbody + 0x18;
111
112
0
  in_locks[l].offset  = BVAL(lock_buffer, 0x00);
113
0
  in_locks[l].length  = BVAL(lock_buffer, 0x08);
114
0
  in_locks[l].flags = IVAL(lock_buffer, 0x10);
115
  /* 0x14 - 4 reserved bytes */
116
117
0
  status = req->session->status;
118
0
  if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_SESSION_EXPIRED)) {
119
    /*
120
     * We need to catch NT_STATUS_NETWORK_SESSION_EXPIRED
121
     * for lock requests only.
122
     *
123
     * Unlock requests still need to be processed!
124
     *
125
     * This means smbd_smb2_request_check_session()
126
     * can't handle the difference and always
127
     * allows SMB2_OP_LOCK.
128
     */
129
0
    if (in_locks[0].flags != SMB2_LOCK_FLAG_UNLOCK) {
130
0
      return smbd_smb2_request_error(req, status);
131
0
    }
132
0
  }
133
134
0
  lock_buffer = SMBD_SMB2_IN_DYN_PTR(req);
135
136
0
  for (l=1; l < in_lock_count; l++) {
137
0
    in_locks[l].offset  = BVAL(lock_buffer, 0x00);
138
0
    in_locks[l].length  = BVAL(lock_buffer, 0x08);
139
0
    in_locks[l].flags = IVAL(lock_buffer, 0x10);
140
    /* 0x14 - 4 reserved bytes */
141
142
0
    lock_buffer += 0x18;
143
0
  }
144
145
0
  in_fsp = file_fsp_smb2(req, in_file_id_persistent, in_file_id_volatile);
146
0
  if (in_fsp == NULL) {
147
0
    return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
148
0
  }
149
150
0
  subreq = smbd_smb2_lock_send(req, req->sconn->ev_ctx,
151
0
             req, in_fsp,
152
0
             in_lock_sequence,
153
0
             in_lock_count,
154
0
             in_locks);
155
0
  if (subreq == NULL) {
156
0
    return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
157
0
  }
158
0
  tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
159
160
0
  return smbd_smb2_request_pending_queue(req, subreq, 500);
161
0
}
162
163
static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
164
0
{
165
0
  struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
166
0
          struct smbd_smb2_request);
167
0
  DATA_BLOB outbody;
168
0
  NTSTATUS status;
169
0
  NTSTATUS error; /* transport error */
170
171
0
  status = smbd_smb2_lock_recv(subreq);
172
0
  TALLOC_FREE(subreq);
173
0
  if (!NT_STATUS_IS_OK(status)) {
174
0
    error = smbd_smb2_request_error(smb2req, status);
175
0
    if (!NT_STATUS_IS_OK(error)) {
176
0
      smbd_server_connection_terminate(smb2req->xconn,
177
0
               nt_errstr(error));
178
0
      return;
179
0
    }
180
0
    return;
181
0
  }
182
183
0
  outbody = smbd_smb2_generate_outbody(smb2req, 0x04);
184
0
  if (outbody.data == NULL) {
185
0
    error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
186
0
    if (!NT_STATUS_IS_OK(error)) {
187
0
      smbd_server_connection_terminate(smb2req->xconn,
188
0
               nt_errstr(error));
189
0
      return;
190
0
    }
191
0
    return;
192
0
  }
193
194
0
  SSVAL(outbody.data, 0x00, 0x04); /* struct size */
195
0
  SSVAL(outbody.data, 0x02, 0);    /* reserved */
196
197
0
  error = smbd_smb2_request_done(smb2req, outbody, NULL);
198
0
  if (!NT_STATUS_IS_OK(error)) {
199
0
    smbd_server_connection_terminate(smb2req->xconn,
200
0
             nt_errstr(error));
201
0
    return;
202
0
  }
203
0
}
204
205
static void smbd_smb2_lock_cleanup(struct tevent_req *req,
206
           enum tevent_req_state req_state);
207
static void smbd_smb2_lock_try(struct tevent_req *req);
208
static void smbd_smb2_lock_retry(struct tevent_req *subreq);
209
static bool smbd_smb2_lock_cancel(struct tevent_req *req);
210
211
static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
212
             struct tevent_context *ev,
213
             struct smbd_smb2_request *smb2req,
214
             struct files_struct *fsp,
215
             uint32_t in_lock_sequence,
216
             uint16_t in_lock_count,
217
             struct smbd_smb2_lock_element *in_locks)
218
0
{
219
0
  struct tevent_req *req;
220
0
  struct smbd_smb2_lock_state *state;
221
0
  bool isunlock = false;
222
0
  uint16_t i;
223
0
  struct smbd_lock_element *locks;
224
0
  NTSTATUS status;
225
0
  bool check_lock_sequence = false;
226
0
  uint32_t lock_sequence_bucket = 0;
227
228
0
  req = tevent_req_create(mem_ctx, &state,
229
0
      struct smbd_smb2_lock_state);
230
0
  if (req == NULL) {
231
0
    return NULL;
232
0
  }
233
0
  state->ev = ev;
234
0
  state->fsp = fsp;
235
0
  state->smb2req = smb2req;
236
0
  smb2req->subreq = req; /* So we can find this when going async. */
237
238
0
  tevent_req_set_cleanup_fn(req, smbd_smb2_lock_cleanup);
239
240
0
  state->smb1req = smbd_smb2_fake_smb_request(smb2req, fsp);
241
0
  if (tevent_req_nomem(state->smb1req, req)) {
242
0
    return tevent_req_post(req, ev);
243
0
  }
244
245
0
  DEBUG(10,("smbd_smb2_lock_send: %s - %s\n",
246
0
      fsp_str_dbg(fsp), fsp_fnum_dbg(fsp)));
247
248
  /*
249
   * Windows sets check_lock_sequence = true
250
   * only for resilient and persistent handles.
251
   *
252
   * [MS-SMB2] 3.3.5.14 Receiving an SMB2 LOCK Request
253
   *
254
   *  ... if Open.IsResilient or Open.IsDurable or Open.IsPersistent is
255
   *  TRUE or if Connection.Dialect belongs to the SMB 3.x dialect family
256
   *  and Connection.ServerCapabilities includes
257
   *  SMB2_GLOBAL_CAP_MULTI_CHANNEL bit, the server SHOULD<314>
258
   *  perform lock sequence * verification ...
259
260
   *  <314> Section 3.3.5.14: Windows 7 and Windows Server 2008 R2 perform
261
   *  lock sequence verification only when Open.IsResilient is TRUE.
262
   *  Windows 8 through Windows 10 v1909 and Windows Server 2012 through
263
   *  Windows Server v1909 perform lock sequence verification only when
264
   *  Open.IsResilient or Open.IsPersistent is TRUE.
265
   *
266
   * Note <314> also applies to all versions (at least) up to
267
   * Windows Server v2004.
268
   *
269
   * Hopefully this will be fixed in future Windows versions and they
270
   * will avoid Note <314>.
271
   *
272
   * We implement what the specification says by default, but
273
   * allow "smb2 disable lock sequence checking = yes" to
274
   * behave like Windows again.
275
   *
276
   * Note: that we already check the dialect before setting
277
   * SMB2_CAP_MULTI_CHANNEL in smb2_negprot.c
278
   */
279
0
  if (smb2req->xconn->smb2.server.capabilities & SMB2_CAP_MULTI_CHANNEL) {
280
0
    check_lock_sequence = true;
281
0
  }
282
0
  if (fsp->op->global->durable) {
283
0
    check_lock_sequence = true;
284
0
  }
285
286
0
  if (check_lock_sequence) {
287
0
    bool disable_lock_sequence_checking =
288
0
      lp_smb2_disable_lock_sequence_checking();
289
290
0
    if (disable_lock_sequence_checking) {
291
0
      check_lock_sequence = false;
292
0
    }
293
0
  }
294
295
0
  if (check_lock_sequence) {
296
0
    state->lock_sequence_value = in_lock_sequence & 0xF;
297
0
    lock_sequence_bucket = in_lock_sequence >> 4;
298
0
  }
299
0
  if ((lock_sequence_bucket > 0) &&
300
0
      (lock_sequence_bucket <= sizeof(fsp->op->global->lock_sequence_array)))
301
0
  {
302
0
    uint32_t idx = lock_sequence_bucket - 1;
303
0
    uint8_t *array = fsp->op->global->lock_sequence_array;
304
305
0
    state->lock_sequence_element = &array[idx];
306
0
  }
307
308
0
  if (state->lock_sequence_element != NULL) {
309
    /*
310
     * The incoming 'state->lock_sequence_value' is masked with 0xF.
311
     *
312
     * Note per default '*state->lock_sequence_element'
313
     * is invalid, a value of 0xFF that can never match on
314
     * incoming value.
315
     */
316
0
    if (*state->lock_sequence_element == state->lock_sequence_value)
317
0
    {
318
0
      DBG_INFO("replayed smb2 lock request detected: "
319
0
         "file %s, value %u, bucket %u\n",
320
0
         fsp_str_dbg(fsp),
321
0
         (unsigned)state->lock_sequence_value,
322
0
         (unsigned)lock_sequence_bucket);
323
0
      tevent_req_done(req);
324
0
      return tevent_req_post(req, ev);
325
0
    }
326
    /*
327
     * If it's not a replay, mark the element as
328
     * invalid again.
329
     */
330
0
    *state->lock_sequence_element = 0xFF;
331
0
  }
332
333
0
  locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
334
0
  if (locks == NULL) {
335
0
    tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
336
0
    return tevent_req_post(req, ev);
337
0
  }
338
339
0
  switch (in_locks[0].flags) {
340
0
  case SMB2_LOCK_FLAG_SHARED:
341
0
  case SMB2_LOCK_FLAG_EXCLUSIVE:
342
0
    if (in_lock_count > 1) {
343
0
      tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
344
0
      return tevent_req_post(req, ev);
345
0
    }
346
0
    state->blocking = true;
347
0
    break;
348
349
0
  case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
350
0
  case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
351
0
    break;
352
353
0
  case SMB2_LOCK_FLAG_UNLOCK:
354
    /* only the first lock gives the UNLOCK bit - see
355
       MS-SMB2 3.3.5.14 */
356
0
    isunlock = true;
357
0
    break;
358
359
0
  default:
360
0
    tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
361
0
    return tevent_req_post(req, ev);
362
0
  }
363
364
0
  if (!isunlock && (in_lock_count > 1)) {
365
366
    /*
367
     * 3.3.5.14.2 says we SHOULD fail with INVALID_PARAMETER if we
368
     * have more than one lock and one of those is blocking.
369
     */
370
371
0
    for (i=0; i<in_lock_count; i++) {
372
0
      uint32_t flags = in_locks[i].flags;
373
374
0
      if ((flags & SMB2_LOCK_FLAG_FAIL_IMMEDIATELY) == 0) {
375
0
        tevent_req_nterror(
376
0
          req, NT_STATUS_INVALID_PARAMETER);
377
0
        return tevent_req_post(req, ev);
378
0
      }
379
0
    }
380
0
  }
381
382
0
  for (i=0; i<in_lock_count; i++) {
383
0
    bool invalid = false;
384
0
    bool posix_handle = fsp->fsp_name->flags &
385
0
      SMB_FILENAME_POSIX_PATH;
386
387
    /*
388
     * For POSIX clients struct files_struct.fsp_flags.posix_open
389
     * and struct smb_filename.flags SMB_FILENAME_POSIX_PATH will
390
     * always be set to the same value.
391
     *
392
     * For macOS clients vfs_fruit with fruit:posix_open=yes, we
393
     * deliberately set both flags to fsp_flags.posix_open=true
394
     * while SMB_FILENAME_POSIX_PATH will not be set.
395
     *
396
     * By deliberately checking the fsp_name flag here instead of
397
     * the fsp flag, Byterange Lock processing uses Windows
398
     * behaviour for macOS clients which is what we want.
399
     */
400
0
    switch (in_locks[i].flags) {
401
0
    case SMB2_LOCK_FLAG_SHARED:
402
0
    case SMB2_LOCK_FLAG_EXCLUSIVE:
403
0
      if (isunlock) {
404
0
        invalid = true;
405
0
        break;
406
0
      }
407
0
      break;
408
409
0
    case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
410
0
    case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
411
0
      if (isunlock) {
412
0
        invalid = true;
413
0
      }
414
0
      break;
415
416
0
    case SMB2_LOCK_FLAG_UNLOCK:
417
0
      if (!isunlock) {
418
0
        tevent_req_nterror(req,
419
0
               NT_STATUS_INVALID_PARAMETER);
420
0
        return tevent_req_post(req, ev);
421
0
      }
422
0
      break;
423
424
0
    default:
425
0
      if (isunlock) {
426
        /*
427
         * If the first element was a UNLOCK
428
         * we need to defer the error response
429
         * to the backend, because we need to process
430
         * all unlock elements before
431
         */
432
0
        invalid = true;
433
0
        break;
434
0
      }
435
0
      tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
436
0
      return tevent_req_post(req, ev);
437
0
    }
438
439
0
    locks[i].req_guid = smbd_request_guid(smb2req->smb1req, i);
440
0
    locks[i].smblctx = fsp->op->global->open_persistent_id;
441
0
    locks[i].offset = in_locks[i].offset;
442
0
    locks[i].count  = in_locks[i].length;
443
444
0
    if (posix_handle) {
445
0
      locks[i].lock_flav = POSIX_LOCK;
446
0
    } else {
447
0
      locks[i].lock_flav = WINDOWS_LOCK;
448
0
    }
449
450
0
    if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
451
0
      if (posix_handle && fsp->fsp_flags.can_write == false) {
452
        /*
453
         * Can't get a write lock on a posix
454
         * read-only handle.
455
         */
456
0
        DBG_INFO("POSIX write lock requested "
457
0
          "on read-only handle for file %s\n",
458
0
          fsp_str_dbg(fsp));
459
0
        tevent_req_nterror(req,
460
0
          NT_STATUS_INVALID_HANDLE);
461
0
        return tevent_req_post(req, ev);
462
0
      }
463
0
      locks[i].brltype = WRITE_LOCK;
464
0
    } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
465
0
      locks[i].brltype = READ_LOCK;
466
0
    } else if (invalid) {
467
      /*
468
       * this is an invalid UNLOCK element
469
       * and the backend needs to test for
470
       * brltype != UNLOCK_LOCK and return
471
       * NT_STATUS_INVALID_PARAMETER
472
       */
473
0
      locks[i].brltype = READ_LOCK;
474
0
    } else {
475
0
      locks[i].brltype = UNLOCK_LOCK;
476
0
    }
477
478
0
    DBG_DEBUG("index %"PRIu16" offset=%"PRIu64", count=%"PRIu64", "
479
0
        "smblctx = %"PRIu64" type %d\n",
480
0
        i,
481
0
        locks[i].offset,
482
0
        locks[i].count,
483
0
        locks[i].smblctx,
484
0
        (int)locks[i].brltype);
485
0
  }
486
487
0
  state->locks = locks;
488
0
  state->lock_count = in_lock_count;
489
490
0
  if (isunlock) {
491
0
    status = smbd_do_unlocking(
492
0
      state->smb1req, fsp, in_lock_count, locks);
493
494
0
    if (tevent_req_nterror(req, status)) {
495
0
      return tevent_req_post(req, ev);
496
0
    }
497
0
    tevent_req_done(req);
498
0
    return tevent_req_post(req, ev);
499
0
  }
500
501
0
  smbd_smb2_lock_try(req);
502
0
  if (!tevent_req_is_in_progress(req)) {
503
0
    return tevent_req_post(req, ev);
504
0
  }
505
506
0
  tevent_req_defer_callback(req, smb2req->sconn->ev_ctx);
507
0
  aio_add_req_to_fsp(state->fsp, req);
508
0
  tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
509
510
0
  return req;
511
0
}
512
513
static void smbd_smb2_lock_cleanup(struct tevent_req *req,
514
           enum tevent_req_state req_state)
515
0
{
516
0
  struct smbd_smb2_lock_state *state = tevent_req_data(
517
0
    req, struct smbd_smb2_lock_state);
518
519
0
  if (req_state != TEVENT_REQ_DONE) {
520
0
    return;
521
0
  }
522
523
0
  if (state->lock_sequence_element != NULL) {
524
    /*
525
     * On success we remember the given/incoming
526
     * value (which was masked with 0xF.
527
     */
528
0
    *state->lock_sequence_element = state->lock_sequence_value;
529
0
  }
530
0
}
531
532
static void smbd_smb2_lock_update_retry_msecs(
533
  struct smbd_smb2_lock_state *state)
534
0
{
535
  /*
536
   * The default lp_lock_spin_time() is 200ms,
537
   * we just use half of it to trigger the first retry.
538
   *
539
   * v_min is in the range of 0.001 to 10 secs
540
   * (0.1 secs by default)
541
   *
542
   * v_max is in the range of 0.01 to 100 secs
543
   * (1.0 secs by default)
544
   *
545
   * The typical steps are:
546
   * 0.1, 0.2, 0.3, 0.4, ... 1.0
547
   */
548
0
  uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()))/2;
549
0
  uint32_t v_max = 10 * v_min;
550
551
0
  if (state->retry_msecs >= v_max) {
552
0
    state->retry_msecs = v_max;
553
0
    return;
554
0
  }
555
556
0
  state->retry_msecs += v_min;
557
0
}
558
559
static void smbd_smb2_lock_update_polling_msecs(
560
  struct smbd_smb2_lock_state *state)
561
0
{
562
  /*
563
   * The default lp_lock_spin_time() is 200ms.
564
   *
565
   * v_min is in the range of 0.002 to 20 secs
566
   * (0.2 secs by default)
567
   *
568
   * v_max is in the range of 0.02 to 200 secs
569
   * (2.0 secs by default)
570
   *
571
   * The typical steps are:
572
   * 0.2, 0.4, 0.6, 0.8, ... 2.0
573
   */
574
0
  uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()));
575
0
  uint32_t v_max = 10 * v_min;
576
577
0
  if (state->polling_msecs >= v_max) {
578
0
    state->polling_msecs = v_max;
579
0
    return;
580
0
  }
581
582
0
  state->polling_msecs += v_min;
583
0
}
584
585
static void smbd_do_locks_try_fn(struct share_mode_lock *lck,
586
         struct byte_range_lock *br_lck,
587
         void *private_data)
588
0
{
589
0
  struct tevent_req *req = talloc_get_type_abort(
590
0
    private_data, struct tevent_req);
591
0
  struct smbd_smb2_lock_state *state = tevent_req_data(
592
0
    req, struct smbd_smb2_lock_state);
593
0
  struct smbd_do_locks_state brl_state;
594
0
  struct tevent_req *subreq = NULL;
595
0
  struct timeval endtime = { 0 };
596
0
  NTSTATUS status;
597
598
  /*
599
   * The caller has checked fsp->fsp_flags.can_lock and lp_locking so
600
   * br_lck has to be there!
601
   */
602
0
  SMB_ASSERT(br_lck != NULL);
603
604
0
  brl_state = (struct smbd_do_locks_state) {
605
0
    .num_locks = state->lock_count,
606
0
    .locks = state->locks,
607
0
  };
608
609
0
  status = smbd_do_locks_try(br_lck, &brl_state);
610
0
  if (NT_STATUS_IS_OK(status)) {
611
0
    tevent_req_done(req);
612
0
    return;
613
0
  }
614
0
  if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
615
    /*
616
     * We got NT_STATUS_RETRY,
617
     * we reset polling_msecs so that
618
     * that the retries based on LOCK_NOT_GRANTED
619
     * will later start with small intervals again.
620
     */
621
0
    state->polling_msecs = 0;
622
623
    /*
624
     * The backend wasn't able to decide yet.
625
     * We need to wait even for non-blocking
626
     * locks.
627
     *
628
     * The backend uses blocking_smblctx == UINT64_MAX
629
     * to indicate that we should use retry timers.
630
     *
631
     * It uses blocking_smblctx == 0 to indicate
632
     * it will use share_mode_wakeup_waiters()
633
     * to wake us. Note that unrelated changes in
634
     * locking.tdb may cause retries.
635
     */
636
637
0
    if (brl_state.blocking_smblctx != UINT64_MAX) {
638
0
      SMB_ASSERT(brl_state.blocking_smblctx == 0);
639
0
      goto setup_retry;
640
0
    }
641
642
0
    smbd_smb2_lock_update_retry_msecs(state);
643
644
0
    DBG_DEBUG("Waiting for a backend decision. "
645
0
        "Retry in %"PRIu32" msecs\n",
646
0
        state->retry_msecs);
647
648
    /*
649
     * We completely ignore state->endtime here
650
     * we we'll wait for a backend decision forever.
651
     * If the backend is smart enough to implement
652
     * some NT_STATUS_RETRY logic, it has to
653
     * switch to any other status after in order
654
     * to avoid waiting forever.
655
     */
656
0
    endtime = timeval_current_ofs_msec(state->retry_msecs);
657
0
    goto setup_retry;
658
0
  }
659
0
  if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
660
    /*
661
     * This is a bug and will be changed into an assert
662
     * in future version. We should only
663
     * ever get NT_STATUS_LOCK_NOT_GRANTED here!
664
     */
665
0
    static uint64_t _bug_count;
666
0
    int _level = (_bug_count++ == 0) ? DBGLVL_ERR: DBGLVL_DEBUG;
667
0
    DBG_PREFIX(_level, ("BUG: Got %s mapping to "
668
0
         "NT_STATUS_LOCK_NOT_GRANTED\n",
669
0
         nt_errstr(status)));
670
0
    status = NT_STATUS_LOCK_NOT_GRANTED;
671
0
  }
672
0
  if (!NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED)) {
673
0
    tevent_req_nterror(req, status);
674
0
    return;
675
0
  }
676
  /*
677
   * We got LOCK_NOT_GRANTED, make sure
678
   * a following STATUS_RETRY will start
679
   * with short intervals again.
680
   */
681
0
  state->retry_msecs = 0;
682
683
0
  if (!state->blocking) {
684
0
    tevent_req_nterror(req, status);
685
0
    return;
686
0
  }
687
688
0
  if (brl_state.blocking_smblctx == UINT64_MAX) {
689
0
    smbd_smb2_lock_update_polling_msecs(state);
690
691
0
    DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
692
0
        state->polling_msecs);
693
694
0
    endtime = timeval_current_ofs_msec(state->polling_msecs);
695
0
  }
696
697
0
setup_retry:
698
0
  DBG_DEBUG("Watching share mode lock\n");
699
700
0
  subreq = share_mode_watch_send(
701
0
    state, state->ev, &state->fsp->file_id, brl_state.blocking_pid);
702
0
  if (tevent_req_nomem(subreq, req)) {
703
0
    return;
704
0
  }
705
0
  tevent_req_set_callback(subreq, smbd_smb2_lock_retry, req);
706
707
0
  if (!timeval_is_zero(&endtime)) {
708
0
    bool ok;
709
710
0
    ok = tevent_req_set_endtime(subreq,
711
0
              state->ev,
712
0
              endtime);
713
0
    if (!ok) {
714
0
      tevent_req_oom(req);
715
0
      return;
716
0
    }
717
0
  }
718
0
}
719
720
static void smbd_smb2_lock_try(struct tevent_req *req)
721
0
{
722
0
  struct smbd_smb2_lock_state *state = tevent_req_data(
723
0
    req, struct smbd_smb2_lock_state);
724
0
  NTSTATUS status;
725
726
0
  if (!state->fsp->fsp_flags.can_lock) {
727
0
    if (state->fsp->fsp_flags.is_directory) {
728
0
      tevent_req_nterror(req,
729
0
             NT_STATUS_INVALID_DEVICE_REQUEST);
730
0
      return;
731
0
    }
732
0
    tevent_req_nterror(req, NT_STATUS_INVALID_HANDLE);
733
0
    return;
734
0
  }
735
736
0
  if (!lp_locking(state->fsp->conn->params)) {
737
0
    return tevent_req_done(req);
738
0
  }
739
740
0
  status = share_mode_do_locked_brl(state->fsp,
741
0
            smbd_do_locks_try_fn,
742
0
            req);
743
0
  if (!NT_STATUS_IS_OK(status)) {
744
0
    tevent_req_nterror(req, status);
745
0
    return;
746
0
  }
747
0
}
748
749
static void smbd_smb2_lock_retry(struct tevent_req *subreq)
750
0
{
751
0
  struct tevent_req *req = tevent_req_callback_data(
752
0
    subreq, struct tevent_req);
753
0
  struct smbd_smb2_lock_state *state = tevent_req_data(
754
0
    req, struct smbd_smb2_lock_state);
755
0
  NTSTATUS status;
756
0
  bool ok;
757
758
  /*
759
   * Make sure we run as the user again
760
   */
761
0
  ok = change_to_user_and_service_by_fsp(state->fsp);
762
0
  if (!ok) {
763
0
    tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
764
0
    return;
765
0
  }
766
767
0
  status = share_mode_watch_recv(subreq, NULL, NULL);
768
0
  TALLOC_FREE(subreq);
769
0
  if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
770
    /*
771
     * This is just a trigger for a timed retry.
772
     */
773
0
    status = NT_STATUS_OK;
774
0
  }
775
0
  if (tevent_req_nterror(req, status)) {
776
0
    return;
777
0
  }
778
779
0
  smbd_smb2_lock_try(req);
780
0
}
781
782
static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
783
0
{
784
0
  return tevent_req_simple_recv_ntstatus(req);
785
0
}
786
787
/****************************************************************
788
 Cancel an outstanding blocking lock request.
789
*****************************************************************/
790
791
static bool smbd_smb2_lock_cancel(struct tevent_req *req)
792
0
{
793
0
  struct smbd_smb2_request *smb2req = NULL;
794
0
  struct smbd_smb2_lock_state *state = tevent_req_data(req,
795
0
        struct smbd_smb2_lock_state);
796
0
  if (!state) {
797
0
    return false;
798
0
  }
799
800
0
  if (!state->smb2req) {
801
0
    return false;
802
0
  }
803
804
0
  smb2req = state->smb2req;
805
806
  /*
807
   * If the request is canceled because of close, logoff or tdis
808
   * the status is NT_STATUS_RANGE_NOT_LOCKED instead of
809
   * NT_STATUS_CANCELLED.
810
   */
811
0
  if (state->fsp->fsp_flags.closing ||
812
0
      !NT_STATUS_IS_OK(smb2req->session->status) ||
813
0
      !NT_STATUS_IS_OK(smb2req->tcon->status)) {
814
0
    tevent_req_nterror(req, NT_STATUS_RANGE_NOT_LOCKED);
815
0
    return true;
816
0
  }
817
818
0
  tevent_req_nterror(req, NT_STATUS_CANCELLED);
819
  return true;
820
0
}