Coverage Report

Created: 2025-10-12 06:48

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Objects/listobject.c
Line
Count
Source
1
/* List object implementation */
2
3
#include "Python.h"
4
#include "pycore_abstract.h"      // _PyIndex_Check()
5
#include "pycore_ceval.h"         // _PyEval_GetBuiltin()
6
#include "pycore_critical_section.h"  // _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED()
7
#include "pycore_dict.h"          // _PyDictViewObject
8
#include "pycore_freelist.h"      // _Py_FREELIST_FREE(), _Py_FREELIST_POP()
9
#include "pycore_interp.h"        // PyInterpreterState.list
10
#include "pycore_list.h"          // struct _Py_list_freelist, _PyListIterObject
11
#include "pycore_long.h"          // _PyLong_DigitCount
12
#include "pycore_modsupport.h"    // _PyArg_NoKwnames()
13
#include "pycore_object.h"        // _PyObject_GC_TRACK(), _PyDebugAllocatorStats()
14
#include "pycore_pyatomic_ft_wrappers.h"
15
#include "pycore_setobject.h"     // _PySet_NextEntry()
16
#include "pycore_stackref.h"      // _Py_TryIncrefCompareStackRef()
17
#include "pycore_tuple.h"         // _PyTuple_FromArraySteal()
18
#include "pycore_typeobject.h"    // _Py_TYPE_VERSION_LIST
19
#include <stddef.h>
20
21
/*[clinic input]
22
class list "PyListObject *" "&PyList_Type"
23
[clinic start generated code]*/
24
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=f9b222678f9f71e0]*/
25
26
#include "clinic/listobject.c.h"
27
28
_Py_DECLARE_STR(list_err, "list index out of range");
29
30
#ifdef Py_GIL_DISABLED
31
typedef struct {
32
    Py_ssize_t allocated;
33
    PyObject *ob_item[];
34
} _PyListArray;
35
36
static _PyListArray *
37
list_allocate_array(size_t capacity)
38
{
39
    if (capacity > PY_SSIZE_T_MAX/sizeof(PyObject*) - 1) {
40
        return NULL;
41
    }
42
    _PyListArray *array = PyMem_Malloc(sizeof(_PyListArray) + capacity * sizeof(PyObject *));
43
    if (array == NULL) {
44
        return NULL;
45
    }
46
    array->allocated = capacity;
47
    return array;
48
}
49
50
static Py_ssize_t
51
list_capacity(PyObject **items)
52
{
53
    _PyListArray *array = _Py_CONTAINER_OF(items, _PyListArray, ob_item);
54
    return array->allocated;
55
}
56
#endif
57
58
static void
59
free_list_items(PyObject** items, bool use_qsbr)
60
119M
{
61
#ifdef Py_GIL_DISABLED
62
    _PyListArray *array = _Py_CONTAINER_OF(items, _PyListArray, ob_item);
63
    if (use_qsbr) {
64
        size_t size = sizeof(_PyListArray) + array->allocated * sizeof(PyObject *);
65
        _PyMem_FreeDelayed(array, size);
66
    }
67
    else {
68
        PyMem_Free(array);
69
    }
70
#else
71
119M
    PyMem_Free(items);
72
119M
#endif
73
119M
}
74
75
static void
76
ensure_shared_on_resize(PyListObject *self)
77
86.2M
{
78
#ifdef Py_GIL_DISABLED
79
    // We can't use _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED here because
80
    // the `CALL_LIST_APPEND` bytecode handler may lock the list without
81
    // a critical section.
82
    assert(Py_REFCNT(self) == 1 || PyMutex_IsLocked(&_PyObject_CAST(self)->ob_mutex));
83
84
    // Ensure that the list array is freed using QSBR if we are not the
85
    // owning thread.
86
    if (!_Py_IsOwnedByCurrentThread((PyObject *)self) &&
87
        !_PyObject_GC_IS_SHARED(self))
88
    {
89
        _PyObject_GC_SET_SHARED(self);
90
    }
91
#endif
92
86.2M
}
93
94
/* Ensure ob_item has room for at least newsize elements, and set
95
 * ob_size to newsize.  If newsize > ob_size on entry, the content
96
 * of the new slots at exit is undefined heap trash; it's the caller's
97
 * responsibility to overwrite them with sane values.
98
 * The number of allocated elements may grow, shrink, or stay the same.
99
 * Failure is impossible if newsize <= self.allocated on entry, although
100
 * that partly relies on an assumption that the system realloc() never
101
 * fails when passed a number of bytes <= the number of bytes last
102
 * allocated (the C standard doesn't guarantee this, but it's hard to
103
 * imagine a realloc implementation where it wouldn't be true).
104
 * Note that self->ob_item may change, and even if newsize is less
105
 * than ob_size on entry.
106
 */
107
static int
108
list_resize(PyListObject *self, Py_ssize_t newsize)
109
102M
{
110
102M
    size_t new_allocated, target_bytes;
111
102M
    Py_ssize_t allocated = self->allocated;
112
113
    /* Bypass realloc() when a previous overallocation is large enough
114
       to accommodate the newsize.  If the newsize falls lower than half
115
       the allocated size, then proceed with the realloc() to shrink the list.
116
    */
117
102M
    if (allocated >= newsize && newsize >= (allocated >> 1)) {
118
16.4M
        assert(self->ob_item != NULL || newsize == 0);
119
16.4M
        Py_SET_SIZE(self, newsize);
120
16.4M
        return 0;
121
16.4M
    }
122
123
    /* This over-allocates proportional to the list size, making room
124
     * for additional growth.  The over-allocation is mild, but is
125
     * enough to give linear-time amortized behavior over a long
126
     * sequence of appends() in the presence of a poorly-performing
127
     * system realloc().
128
     * Add padding to make the allocated size multiple of 4.
129
     * The growth pattern is:  0, 4, 8, 16, 24, 32, 40, 52, 64, 76, ...
130
     * Note: new_allocated won't overflow because the largest possible value
131
     *       is PY_SSIZE_T_MAX * (9 / 8) + 6 which always fits in a size_t.
132
     */
133
86.2M
    new_allocated = ((size_t)newsize + (newsize >> 3) + 6) & ~(size_t)3;
134
    /* Do not overallocate if the new size is closer to overallocated size
135
     * than to the old size.
136
     */
137
86.2M
    if (newsize - Py_SIZE(self) > (Py_ssize_t)(new_allocated - newsize))
138
10.8k
        new_allocated = ((size_t)newsize + 3) & ~(size_t)3;
139
140
86.2M
    if (newsize == 0)
141
2.33k
        new_allocated = 0;
142
143
86.2M
    ensure_shared_on_resize(self);
144
145
#ifdef Py_GIL_DISABLED
146
    _PyListArray *array = list_allocate_array(new_allocated);
147
    if (array == NULL) {
148
        PyErr_NoMemory();
149
        return -1;
150
    }
151
    PyObject **old_items = self->ob_item;
152
    if (self->ob_item) {
153
        if (new_allocated < (size_t)allocated) {
154
            target_bytes = new_allocated * sizeof(PyObject*);
155
        }
156
        else {
157
            target_bytes = allocated * sizeof(PyObject*);
158
        }
159
        memcpy(array->ob_item, self->ob_item, target_bytes);
160
    }
161
    if (new_allocated > (size_t)allocated) {
162
        memset(array->ob_item + allocated, 0, sizeof(PyObject *) * (new_allocated - allocated));
163
    }
164
     _Py_atomic_store_ptr_release(&self->ob_item, &array->ob_item);
165
    self->allocated = new_allocated;
166
    Py_SET_SIZE(self, newsize);
167
    if (old_items != NULL) {
168
        free_list_items(old_items, _PyObject_GC_IS_SHARED(self));
169
    }
170
#else
171
86.2M
    PyObject **items;
172
86.2M
    if (new_allocated <= (size_t)PY_SSIZE_T_MAX / sizeof(PyObject *)) {
173
86.2M
        target_bytes = new_allocated * sizeof(PyObject *);
174
86.2M
        items = (PyObject **)PyMem_Realloc(self->ob_item, target_bytes);
175
86.2M
    }
176
0
    else {
177
        // integer overflow
178
0
        items = NULL;
179
0
    }
180
86.2M
    if (items == NULL) {
181
0
        PyErr_NoMemory();
182
0
        return -1;
183
0
    }
184
86.2M
    self->ob_item = items;
185
86.2M
    Py_SET_SIZE(self, newsize);
186
86.2M
    self->allocated = new_allocated;
187
86.2M
#endif
188
86.2M
    return 0;
189
86.2M
}
190
191
static int
192
list_preallocate_exact(PyListObject *self, Py_ssize_t size)
193
6.82M
{
194
6.82M
    PyObject **items;
195
6.82M
    assert(self->ob_item == NULL);
196
6.82M
    assert(size > 0);
197
198
    /* Since the Python memory allocator has granularity of 16 bytes on 64-bit
199
     * platforms (8 on 32-bit), there is no benefit of allocating space for
200
     * the odd number of items, and there is no drawback of rounding the
201
     * allocated size up to the nearest even number.
202
     */
203
6.82M
    size = (size + 1) & ~(size_t)1;
204
#ifdef Py_GIL_DISABLED
205
    _PyListArray *array = list_allocate_array(size);
206
    if (array == NULL) {
207
        PyErr_NoMemory();
208
        return -1;
209
    }
210
    items = array->ob_item;
211
    memset(items, 0, size * sizeof(PyObject *));
212
#else
213
6.82M
    items = PyMem_New(PyObject*, size);
214
6.82M
    if (items == NULL) {
215
0
        PyErr_NoMemory();
216
0
        return -1;
217
0
    }
218
6.82M
#endif
219
6.82M
    FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item, items);
220
6.82M
    self->allocated = size;
221
6.82M
    return 0;
222
6.82M
}
223
224
/* Print summary info about the state of the optimized allocator */
225
void
226
_PyList_DebugMallocStats(FILE *out)
227
0
{
228
0
    _PyDebugAllocatorStats(out,
229
0
                           "free PyListObject",
230
0
                            _Py_FREELIST_SIZE(lists),
231
0
                           sizeof(PyListObject));
232
0
}
233
234
PyObject *
235
PyList_New(Py_ssize_t size)
236
265M
{
237
265M
    if (size < 0) {
238
0
        PyErr_BadInternalCall();
239
0
        return NULL;
240
0
    }
241
242
265M
    PyListObject *op = _Py_FREELIST_POP(PyListObject, lists);
243
265M
    if (op == NULL) {
244
33.0M
        op = PyObject_GC_New(PyListObject, &PyList_Type);
245
33.0M
        if (op == NULL) {
246
0
            return NULL;
247
0
        }
248
33.0M
    }
249
265M
    if (size <= 0) {
250
223M
        op->ob_item = NULL;
251
223M
    }
252
42.1M
    else {
253
#ifdef Py_GIL_DISABLED
254
        _PyListArray *array = list_allocate_array(size);
255
        if (array == NULL) {
256
            Py_DECREF(op);
257
            return PyErr_NoMemory();
258
        }
259
        memset(&array->ob_item, 0, size * sizeof(PyObject *));
260
        op->ob_item = array->ob_item;
261
#else
262
42.1M
        op->ob_item = (PyObject **) PyMem_Calloc(size, sizeof(PyObject *));
263
42.1M
#endif
264
42.1M
        if (op->ob_item == NULL) {
265
0
            Py_DECREF(op);
266
0
            return PyErr_NoMemory();
267
0
        }
268
42.1M
    }
269
265M
    Py_SET_SIZE(op, size);
270
265M
    op->allocated = size;
271
265M
    _PyObject_GC_TRACK(op);
272
265M
    return (PyObject *) op;
273
265M
}
274
275
static PyObject *
276
list_new_prealloc(Py_ssize_t size)
277
15.6M
{
278
15.6M
    assert(size > 0);
279
15.6M
    PyListObject *op = (PyListObject *) PyList_New(0);
280
15.6M
    if (op == NULL) {
281
0
        return NULL;
282
0
    }
283
15.6M
    assert(op->ob_item == NULL);
284
#ifdef Py_GIL_DISABLED
285
    _PyListArray *array = list_allocate_array(size);
286
    if (array == NULL) {
287
        Py_DECREF(op);
288
        return PyErr_NoMemory();
289
    }
290
    op->ob_item = array->ob_item;
291
#else
292
15.6M
    op->ob_item = PyMem_New(PyObject *, size);
293
15.6M
    if (op->ob_item == NULL) {
294
0
        Py_DECREF(op);
295
0
        return PyErr_NoMemory();
296
0
    }
297
15.6M
#endif
298
15.6M
    op->allocated = size;
299
15.6M
    return (PyObject *) op;
300
15.6M
}
301
302
Py_ssize_t
303
PyList_Size(PyObject *op)
304
91.1k
{
305
91.1k
    if (!PyList_Check(op)) {
306
0
        PyErr_BadInternalCall();
307
0
        return -1;
308
0
    }
309
91.1k
    else {
310
91.1k
        return PyList_GET_SIZE(op);
311
91.1k
    }
312
91.1k
}
313
314
static inline int
315
valid_index(Py_ssize_t i, Py_ssize_t limit)
316
210M
{
317
    /* The cast to size_t lets us use just a single comparison
318
       to check whether i is in the range: 0 <= i < limit.
319
320
       See:  Section 14.2 "Bounds Checking" in the Agner Fog
321
       optimization manual found at:
322
       https://www.agner.org/optimize/optimizing_cpp.pdf
323
    */
324
210M
    return (size_t) i < (size_t) limit;
325
210M
}
326
327
#ifdef Py_GIL_DISABLED
328
329
static PyObject *
330
list_item_impl(PyListObject *self, Py_ssize_t idx)
331
{
332
    PyObject *item = NULL;
333
    Py_BEGIN_CRITICAL_SECTION(self);
334
    if (!_PyObject_GC_IS_SHARED(self)) {
335
        _PyObject_GC_SET_SHARED(self);
336
    }
337
    Py_ssize_t size = Py_SIZE(self);
338
    if (!valid_index(idx, size)) {
339
        goto exit;
340
    }
341
    item = _Py_NewRefWithLock(self->ob_item[idx]);
342
exit:
343
    Py_END_CRITICAL_SECTION();
344
    return item;
345
}
346
347
static inline PyObject*
348
list_get_item_ref(PyListObject *op, Py_ssize_t i)
349
{
350
    if (!_Py_IsOwnedByCurrentThread((PyObject *)op) && !_PyObject_GC_IS_SHARED(op)) {
351
        return list_item_impl(op, i);
352
    }
353
    // Need atomic operation for the getting size.
354
    Py_ssize_t size = PyList_GET_SIZE(op);
355
    if (!valid_index(i, size)) {
356
        return NULL;
357
    }
358
    PyObject **ob_item = _Py_atomic_load_ptr(&op->ob_item);
359
    if (ob_item == NULL) {
360
        return NULL;
361
    }
362
    Py_ssize_t cap = list_capacity(ob_item);
363
    assert(cap != -1);
364
    if (!valid_index(i, cap)) {
365
        return NULL;
366
    }
367
    PyObject *item = _Py_TryXGetRef(&ob_item[i]);
368
    if (item == NULL) {
369
        return list_item_impl(op, i);
370
    }
371
    return item;
372
}
373
#else
374
static inline PyObject*
375
list_get_item_ref(PyListObject *op, Py_ssize_t i)
376
163M
{
377
163M
    if (!valid_index(i, Py_SIZE(op))) {
378
32.8M
        return NULL;
379
32.8M
    }
380
130M
    return Py_NewRef(PyList_GET_ITEM(op, i));
381
163M
}
382
#endif
383
384
PyObject *
385
PyList_GetItem(PyObject *op, Py_ssize_t i)
386
648
{
387
648
    if (!PyList_Check(op)) {
388
0
        PyErr_BadInternalCall();
389
0
        return NULL;
390
0
    }
391
648
    if (!valid_index(i, Py_SIZE(op))) {
392
0
        _Py_DECLARE_STR(list_err, "list index out of range");
393
0
        PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
394
0
        return NULL;
395
0
    }
396
648
    return ((PyListObject *)op) -> ob_item[i];
397
648
}
398
399
PyObject *
400
PyList_GetItemRef(PyObject *op, Py_ssize_t i)
401
84.1k
{
402
84.1k
    if (!PyList_Check(op)) {
403
0
        PyErr_SetString(PyExc_TypeError, "expected a list");
404
0
        return NULL;
405
0
    }
406
84.1k
    PyObject *item = list_get_item_ref((PyListObject *)op, i);
407
84.1k
    if (item == NULL) {
408
0
        _Py_DECLARE_STR(list_err, "list index out of range");
409
0
        PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
410
0
        return NULL;
411
0
    }
412
84.1k
    return item;
413
84.1k
}
414
415
PyObject *
416
_PyList_GetItemRef(PyListObject *list, Py_ssize_t i)
417
1.53k
{
418
1.53k
    return list_get_item_ref(list, i);
419
1.53k
}
420
421
#ifdef Py_GIL_DISABLED
422
int
423
_PyList_GetItemRefNoLock(PyListObject *list, Py_ssize_t i, _PyStackRef *result)
424
{
425
    assert(_Py_IsOwnedByCurrentThread((PyObject *)list) ||
426
           _PyObject_GC_IS_SHARED(list));
427
    if (!valid_index(i, PyList_GET_SIZE(list))) {
428
        return 0;
429
    }
430
    PyObject **ob_item = _Py_atomic_load_ptr(&list->ob_item);
431
    if (ob_item == NULL) {
432
        return 0;
433
    }
434
    Py_ssize_t cap = list_capacity(ob_item);
435
    assert(cap != -1);
436
    if (!valid_index(i, cap)) {
437
        return 0;
438
    }
439
    PyObject *obj = _Py_atomic_load_ptr(&ob_item[i]);
440
    if (obj == NULL || !_Py_TryIncrefCompareStackRef(&ob_item[i], obj, result)) {
441
        return -1;
442
    }
443
    return 1;
444
}
445
#endif
446
447
int
448
PyList_SetItem(PyObject *op, Py_ssize_t i,
449
               PyObject *newitem)
450
22.8k
{
451
22.8k
    if (!PyList_Check(op)) {
452
0
        Py_XDECREF(newitem);
453
0
        PyErr_BadInternalCall();
454
0
        return -1;
455
0
    }
456
22.8k
    int ret;
457
22.8k
    PyListObject *self = ((PyListObject *)op);
458
22.8k
    Py_BEGIN_CRITICAL_SECTION(self);
459
22.8k
    if (!valid_index(i, Py_SIZE(self))) {
460
0
        Py_XDECREF(newitem);
461
0
        PyErr_SetString(PyExc_IndexError,
462
0
                        "list assignment index out of range");
463
0
        ret = -1;
464
0
        goto end;
465
0
    }
466
22.8k
    PyObject *tmp = self->ob_item[i];
467
22.8k
    FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item[i], newitem);
468
22.8k
    Py_XDECREF(tmp);
469
22.8k
    ret = 0;
470
22.8k
end:;
471
22.8k
    Py_END_CRITICAL_SECTION();
472
22.8k
    return ret;
473
22.8k
}
474
475
static int
476
ins1(PyListObject *self, Py_ssize_t where, PyObject *v)
477
76
{
478
76
    Py_ssize_t i, n = Py_SIZE(self);
479
76
    PyObject **items;
480
76
    if (v == NULL) {
481
0
        PyErr_BadInternalCall();
482
0
        return -1;
483
0
    }
484
485
76
    assert((size_t)n + 1 < PY_SSIZE_T_MAX);
486
76
    if (list_resize(self, n+1) < 0)
487
0
        return -1;
488
489
76
    if (where < 0) {
490
0
        where += n;
491
0
        if (where < 0)
492
0
            where = 0;
493
0
    }
494
76
    if (where > n)
495
0
        where = n;
496
76
    items = self->ob_item;
497
462
    for (i = n; --i >= where; )
498
386
        FT_ATOMIC_STORE_PTR_RELAXED(items[i+1], items[i]);
499
76
    FT_ATOMIC_STORE_PTR_RELEASE(items[where], Py_NewRef(v));
500
76
    return 0;
501
76
}
502
503
int
504
PyList_Insert(PyObject *op, Py_ssize_t where, PyObject *newitem)
505
16
{
506
16
    if (!PyList_Check(op)) {
507
0
        PyErr_BadInternalCall();
508
0
        return -1;
509
0
    }
510
16
    PyListObject *self = (PyListObject *)op;
511
16
    int err;
512
16
    Py_BEGIN_CRITICAL_SECTION(self);
513
16
    err = ins1(self, where, newitem);
514
16
    Py_END_CRITICAL_SECTION();
515
16
    return err;
516
16
}
517
518
/* internal, used by _PyList_AppendTakeRef */
519
int
520
_PyList_AppendTakeRefListResize(PyListObject *self, PyObject *newitem)
521
63.9M
{
522
63.9M
    Py_ssize_t len = Py_SIZE(self);
523
63.9M
    assert(self->allocated == -1 || self->allocated == len);
524
63.9M
    if (list_resize(self, len + 1) < 0) {
525
0
        Py_DECREF(newitem);
526
0
        return -1;
527
0
    }
528
63.9M
    FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item[len], newitem);
529
63.9M
    return 0;
530
63.9M
}
531
532
int
533
PyList_Append(PyObject *op, PyObject *newitem)
534
176M
{
535
176M
    if (PyList_Check(op) && (newitem != NULL)) {
536
176M
        int ret;
537
176M
        Py_BEGIN_CRITICAL_SECTION(op);
538
176M
        ret = _PyList_AppendTakeRef((PyListObject *)op, Py_NewRef(newitem));
539
176M
        Py_END_CRITICAL_SECTION();
540
176M
        return ret;
541
176M
    }
542
0
    PyErr_BadInternalCall();
543
0
    return -1;
544
176M
}
545
546
/* Methods */
547
548
static void
549
list_dealloc(PyObject *self)
550
287M
{
551
287M
    PyListObject *op = (PyListObject *)self;
552
287M
    Py_ssize_t i;
553
287M
    PyObject_GC_UnTrack(op);
554
287M
    if (op->ob_item != NULL) {
555
        /* Do it backwards, for Christian Tismer.
556
           There's a simple test case where somehow this reduces
557
           thrashing when a *very* large list is created and
558
           immediately deleted. */
559
106M
        i = Py_SIZE(op);
560
1.51G
        while (--i >= 0) {
561
1.41G
            Py_XDECREF(op->ob_item[i]);
562
1.41G
        }
563
106M
        free_list_items(op->ob_item, false);
564
106M
        op->ob_item = NULL;
565
106M
    }
566
287M
    if (PyList_CheckExact(op)) {
567
275M
        _Py_FREELIST_FREE(lists, op, PyObject_GC_Del);
568
275M
    }
569
12.0M
    else {
570
12.0M
        PyObject_GC_Del(op);
571
12.0M
    }
572
287M
}
573
574
static PyObject *
575
list_repr_impl(PyListObject *v)
576
4.38M
{
577
4.38M
    int res = Py_ReprEnter((PyObject*)v);
578
4.38M
    if (res != 0) {
579
0
        return (res > 0 ? PyUnicode_FromString("[...]") : NULL);
580
0
    }
581
582
    /* "[" + "1" + ", 2" * (len - 1) + "]" */
583
4.38M
    Py_ssize_t prealloc = 1 + 1 + (2 + 1) * (Py_SIZE(v) - 1) + 1;
584
4.38M
    PyUnicodeWriter *writer = PyUnicodeWriter_Create(prealloc);
585
4.38M
    PyObject *item = NULL;
586
4.38M
    if (writer == NULL) {
587
0
        goto error;
588
0
    }
589
590
4.38M
    if (PyUnicodeWriter_WriteChar(writer, '[') < 0) {
591
0
        goto error;
592
0
    }
593
594
    /* Do repr() on each element.  Note that this may mutate the list,
595
       so must refetch the list size on each iteration. */
596
12.3M
    for (Py_ssize_t i = 0; i < Py_SIZE(v); ++i) {
597
        /* Hold a strong reference since repr(item) can mutate the list */
598
7.92M
        item = Py_NewRef(v->ob_item[i]);
599
600
7.92M
        if (i > 0) {
601
3.54M
            if (PyUnicodeWriter_WriteChar(writer, ',') < 0) {
602
0
                goto error;
603
0
            }
604
3.54M
            if (PyUnicodeWriter_WriteChar(writer, ' ') < 0) {
605
0
                goto error;
606
0
            }
607
3.54M
        }
608
609
7.92M
        if (PyUnicodeWriter_WriteRepr(writer, item) < 0) {
610
0
            goto error;
611
0
        }
612
7.92M
        Py_CLEAR(item);
613
7.92M
    }
614
615
4.38M
    if (PyUnicodeWriter_WriteChar(writer, ']') < 0) {
616
0
        goto error;
617
0
    }
618
619
4.38M
    Py_ReprLeave((PyObject *)v);
620
4.38M
    return PyUnicodeWriter_Finish(writer);
621
622
0
error:
623
0
    Py_XDECREF(item);
624
0
    PyUnicodeWriter_Discard(writer);
625
0
    Py_ReprLeave((PyObject *)v);
626
0
    return NULL;
627
4.38M
}
628
629
static PyObject *
630
list_repr(PyObject *self)
631
4.38M
{
632
4.38M
    if (PyList_GET_SIZE(self) == 0) {
633
4.58k
        return PyUnicode_FromString("[]");
634
4.58k
    }
635
4.38M
    PyListObject *v = (PyListObject *)self;
636
4.38M
    PyObject *ret = NULL;
637
4.38M
    Py_BEGIN_CRITICAL_SECTION(v);
638
4.38M
    ret = list_repr_impl(v);
639
4.38M
    Py_END_CRITICAL_SECTION();
640
4.38M
    return ret;
641
4.38M
}
642
643
static Py_ssize_t
644
list_length(PyObject *a)
645
48.2M
{
646
48.2M
    return PyList_GET_SIZE(a);
647
48.2M
}
648
649
static int
650
list_contains(PyObject *aa, PyObject *el)
651
7.62k
{
652
653
34.1k
    for (Py_ssize_t i = 0; ; i++) {
654
34.1k
        PyObject *item = list_get_item_ref((PyListObject *)aa, i);
655
34.1k
        if (item == NULL) {
656
            // out-of-bounds
657
7.01k
            return 0;
658
7.01k
        }
659
27.1k
        int cmp = PyObject_RichCompareBool(item, el, Py_EQ);
660
27.1k
        Py_DECREF(item);
661
27.1k
        if (cmp != 0) {
662
612
            return cmp;
663
612
        }
664
27.1k
    }
665
0
    return 0;
666
7.62k
}
667
668
static PyObject *
669
list_item(PyObject *aa, Py_ssize_t i)
670
16.1M
{
671
16.1M
    PyListObject *a = (PyListObject *)aa;
672
16.1M
    if (!valid_index(i, PyList_GET_SIZE(a))) {
673
978
        PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
674
978
        return NULL;
675
978
    }
676
16.1M
    PyObject *item;
677
#ifdef Py_GIL_DISABLED
678
    item = list_get_item_ref(a, i);
679
    if (item == NULL) {
680
        PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
681
        return NULL;
682
    }
683
#else
684
16.1M
    item = Py_NewRef(a->ob_item[i]);
685
16.1M
#endif
686
16.1M
    return item;
687
16.1M
}
688
689
static PyObject *
690
list_slice_lock_held(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
691
2.15M
{
692
2.15M
    PyListObject *np;
693
2.15M
    PyObject **src, **dest;
694
2.15M
    Py_ssize_t i, len;
695
2.15M
    len = ihigh - ilow;
696
2.15M
    if (len <= 0) {
697
0
        return PyList_New(0);
698
0
    }
699
2.15M
    np = (PyListObject *) list_new_prealloc(len);
700
2.15M
    if (np == NULL)
701
0
        return NULL;
702
703
2.15M
    src = a->ob_item + ilow;
704
2.15M
    dest = np->ob_item;
705
32.9M
    for (i = 0; i < len; i++) {
706
30.8M
        PyObject *v = src[i];
707
30.8M
        dest[i] = Py_NewRef(v);
708
30.8M
    }
709
2.15M
    Py_SET_SIZE(np, len);
710
2.15M
    return (PyObject *)np;
711
2.15M
}
712
713
PyObject *
714
PyList_GetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
715
0
{
716
0
    if (!PyList_Check(a)) {
717
0
        PyErr_BadInternalCall();
718
0
        return NULL;
719
0
    }
720
0
    PyObject *ret;
721
0
    Py_BEGIN_CRITICAL_SECTION(a);
722
0
    if (ilow < 0) {
723
0
        ilow = 0;
724
0
    }
725
0
    else if (ilow > Py_SIZE(a)) {
726
0
        ilow = Py_SIZE(a);
727
0
    }
728
0
    if (ihigh < ilow) {
729
0
        ihigh = ilow;
730
0
    }
731
0
    else if (ihigh > Py_SIZE(a)) {
732
0
        ihigh = Py_SIZE(a);
733
0
    }
734
0
    ret = list_slice_lock_held((PyListObject *)a, ilow, ihigh);
735
0
    Py_END_CRITICAL_SECTION();
736
0
    return ret;
737
0
}
738
739
static PyObject *
740
list_concat_lock_held(PyListObject *a, PyListObject *b)
741
22.3M
{
742
22.3M
    Py_ssize_t size;
743
22.3M
    Py_ssize_t i;
744
22.3M
    PyObject **src, **dest;
745
22.3M
    PyListObject *np;
746
22.3M
    assert((size_t)Py_SIZE(a) + (size_t)Py_SIZE(b) < PY_SSIZE_T_MAX);
747
22.3M
    size = Py_SIZE(a) + Py_SIZE(b);
748
22.3M
    if (size == 0) {
749
8.86M
        return PyList_New(0);
750
8.86M
    }
751
13.4M
    np = (PyListObject *) list_new_prealloc(size);
752
13.4M
    if (np == NULL) {
753
0
        return NULL;
754
0
    }
755
13.4M
    src = a->ob_item;
756
13.4M
    dest = np->ob_item;
757
486M
    for (i = 0; i < Py_SIZE(a); i++) {
758
472M
        PyObject *v = src[i];
759
472M
        dest[i] = Py_NewRef(v);
760
472M
    }
761
13.4M
    src = b->ob_item;
762
13.4M
    dest = np->ob_item + Py_SIZE(a);
763
214M
    for (i = 0; i < Py_SIZE(b); i++) {
764
200M
        PyObject *v = src[i];
765
200M
        dest[i] = Py_NewRef(v);
766
200M
    }
767
13.4M
    Py_SET_SIZE(np, size);
768
13.4M
    return (PyObject *)np;
769
13.4M
}
770
771
static PyObject *
772
list_concat(PyObject *aa, PyObject *bb)
773
22.3M
{
774
22.3M
    if (!PyList_Check(bb)) {
775
0
        PyErr_Format(PyExc_TypeError,
776
0
                  "can only concatenate list (not \"%.200s\") to list",
777
0
                  Py_TYPE(bb)->tp_name);
778
0
        return NULL;
779
0
    }
780
22.3M
    PyListObject *a = (PyListObject *)aa;
781
22.3M
    PyListObject *b = (PyListObject *)bb;
782
22.3M
    PyObject *ret;
783
22.3M
    Py_BEGIN_CRITICAL_SECTION2(a, b);
784
22.3M
    ret = list_concat_lock_held(a, b);
785
22.3M
    Py_END_CRITICAL_SECTION2();
786
22.3M
    return ret;
787
22.3M
}
788
789
static PyObject *
790
list_repeat_lock_held(PyListObject *a, Py_ssize_t n)
791
14.6k
{
792
14.6k
    const Py_ssize_t input_size = Py_SIZE(a);
793
14.6k
    if (input_size == 0 || n <= 0)
794
2.23k
        return PyList_New(0);
795
14.6k
    assert(n > 0);
796
797
12.4k
    if (input_size > PY_SSIZE_T_MAX / n)
798
0
        return PyErr_NoMemory();
799
12.4k
    Py_ssize_t output_size = input_size * n;
800
801
12.4k
    PyListObject *np = (PyListObject *) list_new_prealloc(output_size);
802
12.4k
    if (np == NULL)
803
0
        return NULL;
804
805
12.4k
    PyObject **dest = np->ob_item;
806
12.4k
    if (input_size == 1) {
807
12.4k
        PyObject *elem = a->ob_item[0];
808
12.4k
        _Py_RefcntAdd(elem, n);
809
12.4k
        PyObject **dest_end = dest + output_size;
810
11.0M
        while (dest < dest_end) {
811
10.9M
            *dest++ = elem;
812
10.9M
        }
813
12.4k
    }
814
0
    else {
815
0
        PyObject **src = a->ob_item;
816
0
        PyObject **src_end = src + input_size;
817
0
        while (src < src_end) {
818
0
            _Py_RefcntAdd(*src, n);
819
0
            *dest++ = *src++;
820
0
        }
821
        // TODO: _Py_memory_repeat calls are not safe for shared lists in
822
        // GIL_DISABLED builds. (See issue #129069)
823
0
        _Py_memory_repeat((char *)np->ob_item, sizeof(PyObject *)*output_size,
824
0
                                        sizeof(PyObject *)*input_size);
825
0
    }
826
827
12.4k
    Py_SET_SIZE(np, output_size);
828
12.4k
    return (PyObject *) np;
829
12.4k
}
830
831
static PyObject *
832
list_repeat(PyObject *aa, Py_ssize_t n)
833
14.6k
{
834
14.6k
    PyObject *ret;
835
14.6k
    PyListObject *a = (PyListObject *)aa;
836
14.6k
    Py_BEGIN_CRITICAL_SECTION(a);
837
14.6k
    ret = list_repeat_lock_held(a, n);
838
14.6k
    Py_END_CRITICAL_SECTION();
839
14.6k
    return ret;
840
14.6k
}
841
842
static void
843
list_clear_impl(PyListObject *a, bool is_resize)
844
13.2M
{
845
13.2M
    PyObject **items = a->ob_item;
846
13.2M
    if (items == NULL) {
847
0
        return;
848
0
    }
849
850
    /* Because XDECREF can recursively invoke operations on
851
       this list, we make it empty first. */
852
13.2M
    Py_ssize_t i = Py_SIZE(a);
853
13.2M
    Py_SET_SIZE(a, 0);
854
13.2M
    FT_ATOMIC_STORE_PTR_RELEASE(a->ob_item, NULL);
855
13.2M
    a->allocated = 0;
856
26.6M
    while (--i >= 0) {
857
13.3M
        Py_XDECREF(items[i]);
858
13.3M
    }
859
#ifdef Py_GIL_DISABLED
860
    if (is_resize) {
861
        ensure_shared_on_resize(a);
862
    }
863
    bool use_qsbr = is_resize && _PyObject_GC_IS_SHARED(a);
864
#else
865
13.2M
    bool use_qsbr = false;
866
13.2M
#endif
867
13.2M
    free_list_items(items, use_qsbr);
868
    // Note that there is no guarantee that the list is actually empty
869
    // at this point, because XDECREF may have populated it indirectly again!
870
13.2M
}
871
872
static void
873
list_clear(PyListObject *a)
874
13.2M
{
875
13.2M
    list_clear_impl(a, true);
876
13.2M
}
877
878
static int
879
list_clear_slot(PyObject *self)
880
0
{
881
0
    list_clear_impl((PyListObject *)self, false);
882
0
    return 0;
883
0
}
884
885
/* a[ilow:ihigh] = v if v != NULL.
886
 * del a[ilow:ihigh] if v == NULL.
887
 *
888
 * Special speed gimmick:  when v is NULL and ihigh - ilow <= 8, it's
889
 * guaranteed the call cannot fail.
890
 */
891
static int
892
list_ass_slice_lock_held(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
893
4.63M
{
894
    /* Because [X]DECREF can recursively invoke list operations on
895
       this list, we must postpone all [X]DECREF activity until
896
       after the list is back in its canonical shape.  Therefore
897
       we must allocate an additional array, 'recycle', into which
898
       we temporarily copy the items that are deleted from the
899
       list. :-( */
900
4.63M
    PyObject *recycle_on_stack[8];
901
4.63M
    PyObject **recycle = recycle_on_stack; /* will allocate more if needed */
902
4.63M
    PyObject **item;
903
4.63M
    PyObject **vitem = NULL;
904
4.63M
    PyObject *v_as_SF = NULL; /* PySequence_Fast(v) */
905
4.63M
    Py_ssize_t n; /* # of elements in replacement list */
906
4.63M
    Py_ssize_t norig; /* # of elements in list getting replaced */
907
4.63M
    Py_ssize_t d; /* Change in size */
908
4.63M
    Py_ssize_t k;
909
4.63M
    size_t s;
910
4.63M
    int result = -1;            /* guilty until proved innocent */
911
4.63M
#define b ((PyListObject *)v)
912
4.63M
    if (v == NULL)
913
4.40M
        n = 0;
914
233k
    else {
915
233k
        v_as_SF = PySequence_Fast(v, "can only assign an iterable");
916
233k
        if(v_as_SF == NULL)
917
0
            goto Error;
918
233k
        n = PySequence_Fast_GET_SIZE(v_as_SF);
919
233k
        vitem = PySequence_Fast_ITEMS(v_as_SF);
920
233k
    }
921
4.63M
    if (ilow < 0)
922
0
        ilow = 0;
923
4.63M
    else if (ilow > Py_SIZE(a))
924
0
        ilow = Py_SIZE(a);
925
926
4.63M
    if (ihigh < ilow)
927
0
        ihigh = ilow;
928
4.63M
    else if (ihigh > Py_SIZE(a))
929
0
        ihigh = Py_SIZE(a);
930
931
4.63M
    norig = ihigh - ilow;
932
4.63M
    assert(norig >= 0);
933
4.63M
    d = n - norig;
934
4.63M
    if (Py_SIZE(a) + d == 0) {
935
629k
        Py_XDECREF(v_as_SF);
936
629k
        list_clear(a);
937
629k
        return 0;
938
629k
    }
939
4.00M
    item = a->ob_item;
940
    /* recycle the items that we are about to remove */
941
4.00M
    s = norig * sizeof(PyObject *);
942
    /* If norig == 0, item might be NULL, in which case we may not memcpy from it. */
943
4.00M
    if (s) {
944
3.78M
        if (s > sizeof(recycle_on_stack)) {
945
127
            recycle = (PyObject **)PyMem_Malloc(s);
946
127
            if (recycle == NULL) {
947
0
                PyErr_NoMemory();
948
0
                goto Error;
949
0
            }
950
127
        }
951
3.78M
        memcpy(recycle, &item[ilow], s);
952
3.78M
    }
953
954
4.00M
    if (d < 0) { /* Delete -d items */
955
3.78M
        Py_ssize_t tail;
956
3.78M
        tail = (Py_SIZE(a) - ihigh) * sizeof(PyObject *);
957
        // TODO: these memmove/memcpy calls are not safe for shared lists in
958
        // GIL_DISABLED builds. (See issue #129069)
959
3.78M
        memmove(&item[ihigh+d], &item[ihigh], tail);
960
3.78M
        if (list_resize(a, Py_SIZE(a) + d) < 0) {
961
0
            memmove(&item[ihigh], &item[ihigh+d], tail);
962
0
            memcpy(&item[ilow], recycle, s);
963
0
            goto Error;
964
0
        }
965
3.78M
        item = a->ob_item;
966
3.78M
    }
967
218k
    else if (d > 0) { /* Insert d items */
968
218k
        k = Py_SIZE(a);
969
218k
        if (list_resize(a, k+d) < 0)
970
0
            goto Error;
971
218k
        item = a->ob_item;
972
        // TODO: these memmove/memcpy calls are not safe for shared lists in
973
        // GIL_DISABLED builds. (See issue #129069)
974
218k
        memmove(&item[ihigh+d], &item[ihigh],
975
218k
            (k - ihigh)*sizeof(PyObject *));
976
218k
    }
977
4.22M
    for (k = 0; k < n; k++, ilow++) {
978
218k
        PyObject *w = vitem[k];
979
218k
        FT_ATOMIC_STORE_PTR_RELEASE(item[ilow], Py_XNewRef(w));
980
218k
    }
981
7.80M
    for (k = norig - 1; k >= 0; --k)
982
3.79M
        Py_XDECREF(recycle[k]);
983
4.00M
    result = 0;
984
4.00M
 Error:
985
4.00M
    if (recycle != recycle_on_stack)
986
127
        PyMem_Free(recycle);
987
4.00M
    Py_XDECREF(v_as_SF);
988
4.00M
    return result;
989
4.00M
#undef b
990
4.00M
}
991
992
static int
993
list_ass_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
994
4.39M
{
995
4.39M
    int ret;
996
4.39M
    if (a == (PyListObject *)v) {
997
0
        Py_BEGIN_CRITICAL_SECTION(a);
998
0
        Py_ssize_t n = PyList_GET_SIZE(a);
999
0
        PyObject *copy = list_slice_lock_held(a, 0, n);
1000
0
        if (copy == NULL) {
1001
0
            ret = -1;
1002
0
        }
1003
0
        else {
1004
0
            ret = list_ass_slice_lock_held(a, ilow, ihigh, copy);
1005
0
            Py_DECREF(copy);
1006
0
        }
1007
0
        Py_END_CRITICAL_SECTION();
1008
0
    }
1009
4.39M
    else if (v != NULL && PyList_CheckExact(v)) {
1010
254
        Py_BEGIN_CRITICAL_SECTION2(a, v);
1011
254
        ret = list_ass_slice_lock_held(a, ilow, ihigh, v);
1012
254
        Py_END_CRITICAL_SECTION2();
1013
254
    }
1014
4.39M
    else {
1015
4.39M
        Py_BEGIN_CRITICAL_SECTION(a);
1016
4.39M
        ret = list_ass_slice_lock_held(a, ilow, ihigh, v);
1017
4.39M
        Py_END_CRITICAL_SECTION();
1018
4.39M
    }
1019
4.39M
    return ret;
1020
4.39M
}
1021
1022
int
1023
PyList_SetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
1024
4.39M
{
1025
4.39M
    if (!PyList_Check(a)) {
1026
0
        PyErr_BadInternalCall();
1027
0
        return -1;
1028
0
    }
1029
4.39M
    return list_ass_slice((PyListObject *)a, ilow, ihigh, v);
1030
4.39M
}
1031
1032
static int
1033
list_inplace_repeat_lock_held(PyListObject *self, Py_ssize_t n)
1034
0
{
1035
0
    Py_ssize_t input_size = PyList_GET_SIZE(self);
1036
0
    if (input_size == 0 || n == 1) {
1037
0
        return 0;
1038
0
    }
1039
1040
0
    if (n < 1) {
1041
0
        list_clear(self);
1042
0
        return 0;
1043
0
    }
1044
1045
0
    if (input_size > PY_SSIZE_T_MAX / n) {
1046
0
        PyErr_NoMemory();
1047
0
        return -1;
1048
0
    }
1049
0
    Py_ssize_t output_size = input_size * n;
1050
1051
0
    if (list_resize(self, output_size) < 0) {
1052
0
        return -1;
1053
0
    }
1054
1055
0
    PyObject **items = self->ob_item;
1056
0
    for (Py_ssize_t j = 0; j < input_size; j++) {
1057
0
        _Py_RefcntAdd(items[j], n-1);
1058
0
    }
1059
    // TODO: _Py_memory_repeat calls are not safe for shared lists in
1060
    // GIL_DISABLED builds. (See issue #129069)
1061
0
    _Py_memory_repeat((char *)items, sizeof(PyObject *)*output_size,
1062
0
                      sizeof(PyObject *)*input_size);
1063
0
    return 0;
1064
0
}
1065
1066
static PyObject *
1067
list_inplace_repeat(PyObject *_self, Py_ssize_t n)
1068
0
{
1069
0
    PyObject *ret;
1070
0
    PyListObject *self = (PyListObject *) _self;
1071
0
    Py_BEGIN_CRITICAL_SECTION(self);
1072
0
    if (list_inplace_repeat_lock_held(self, n) < 0) {
1073
0
        ret = NULL;
1074
0
    }
1075
0
    else {
1076
0
        ret = Py_NewRef(self);
1077
0
    }
1078
0
    Py_END_CRITICAL_SECTION();
1079
0
    return ret;
1080
0
}
1081
1082
static int
1083
list_ass_item_lock_held(PyListObject *a, Py_ssize_t i, PyObject *v)
1084
9.66k
{
1085
9.66k
    if (!valid_index(i, Py_SIZE(a))) {
1086
0
        PyErr_SetString(PyExc_IndexError,
1087
0
                        "list assignment index out of range");
1088
0
        return -1;
1089
0
    }
1090
9.66k
    PyObject *tmp = a->ob_item[i];
1091
9.66k
    if (v == NULL) {
1092
6.62k
        Py_ssize_t size = Py_SIZE(a);
1093
6.62k
        for (Py_ssize_t idx = i; idx < size - 1; idx++) {
1094
0
            FT_ATOMIC_STORE_PTR_RELAXED(a->ob_item[idx], a->ob_item[idx + 1]);
1095
0
        }
1096
6.62k
        Py_SET_SIZE(a, size - 1);
1097
6.62k
    }
1098
3.04k
    else {
1099
3.04k
        FT_ATOMIC_STORE_PTR_RELEASE(a->ob_item[i], Py_NewRef(v));
1100
3.04k
    }
1101
9.66k
    Py_DECREF(tmp);
1102
9.66k
    return 0;
1103
9.66k
}
1104
1105
static int
1106
list_ass_item(PyObject *aa, Py_ssize_t i, PyObject *v)
1107
6.54k
{
1108
6.54k
    int ret;
1109
6.54k
    PyListObject *a = (PyListObject *)aa;
1110
6.54k
    Py_BEGIN_CRITICAL_SECTION(a);
1111
6.54k
    ret = list_ass_item_lock_held(a, i, v);
1112
6.54k
    Py_END_CRITICAL_SECTION();
1113
6.54k
    return ret;
1114
6.54k
}
1115
1116
/*[clinic input]
1117
@critical_section
1118
list.insert
1119
1120
    index: Py_ssize_t
1121
    object: object
1122
    /
1123
1124
Insert object before index.
1125
[clinic start generated code]*/
1126
1127
static PyObject *
1128
list_insert_impl(PyListObject *self, Py_ssize_t index, PyObject *object)
1129
/*[clinic end generated code: output=7f35e32f60c8cb78 input=b1987ca998a4ae2d]*/
1130
60
{
1131
60
    if (ins1(self, index, object) == 0) {
1132
60
        Py_RETURN_NONE;
1133
60
    }
1134
0
    return NULL;
1135
60
}
1136
1137
/*[clinic input]
1138
@critical_section
1139
list.clear as py_list_clear
1140
1141
Remove all items from list.
1142
[clinic start generated code]*/
1143
1144
static PyObject *
1145
py_list_clear_impl(PyListObject *self)
1146
/*[clinic end generated code: output=83726743807e3518 input=e285b7f09051a9ba]*/
1147
172
{
1148
172
    list_clear(self);
1149
172
    Py_RETURN_NONE;
1150
172
}
1151
1152
/*[clinic input]
1153
@critical_section
1154
list.copy
1155
1156
Return a shallow copy of the list.
1157
[clinic start generated code]*/
1158
1159
static PyObject *
1160
list_copy_impl(PyListObject *self)
1161
/*[clinic end generated code: output=ec6b72d6209d418e input=81c54b0c7bb4f73d]*/
1162
0
{
1163
0
    return list_slice_lock_held(self, 0, Py_SIZE(self));
1164
0
}
1165
1166
/*[clinic input]
1167
@critical_section
1168
list.append
1169
1170
     object: object
1171
     /
1172
1173
Append object to the end of the list.
1174
[clinic start generated code]*/
1175
1176
static PyObject *
1177
list_append_impl(PyListObject *self, PyObject *object)
1178
/*[clinic end generated code: output=78423561d92ed405 input=122b0853de54004f]*/
1179
21.5M
{
1180
21.5M
    if (_PyList_AppendTakeRef(self, Py_NewRef(object)) < 0) {
1181
0
        return NULL;
1182
0
    }
1183
21.5M
    Py_RETURN_NONE;
1184
21.5M
}
1185
1186
static int
1187
list_extend_fast(PyListObject *self, PyObject *iterable)
1188
22.6M
{
1189
22.6M
    Py_ssize_t n = PySequence_Fast_GET_SIZE(iterable);
1190
22.6M
    if (n == 0) {
1191
        /* short circuit when iterable is empty */
1192
9.69M
        return 0;
1193
9.69M
    }
1194
1195
12.9M
    Py_ssize_t m = Py_SIZE(self);
1196
    // It should not be possible to allocate a list large enough to cause
1197
    // an overflow on any relevant platform.
1198
12.9M
    assert(m < PY_SSIZE_T_MAX - n);
1199
12.9M
    if (self->ob_item == NULL) {
1200
1.44M
        if (list_preallocate_exact(self, n) < 0) {
1201
0
            return -1;
1202
0
        }
1203
1.44M
        Py_SET_SIZE(self, n);
1204
1.44M
    }
1205
11.5M
    else if (list_resize(self, m + n) < 0) {
1206
0
        return -1;
1207
0
    }
1208
1209
    // note that we may still have self == iterable here for the
1210
    // situation a.extend(a), but the following code works
1211
    // in that case too.  Just make sure to resize self
1212
    // before calling PySequence_Fast_ITEMS.
1213
    //
1214
    // populate the end of self with iterable's items.
1215
12.9M
    PyObject **src = PySequence_Fast_ITEMS(iterable);
1216
12.9M
    PyObject **dest = self->ob_item + m;
1217
41.7M
    for (Py_ssize_t i = 0; i < n; i++) {
1218
28.7M
        PyObject *o = src[i];
1219
28.7M
        FT_ATOMIC_STORE_PTR_RELEASE(dest[i], Py_NewRef(o));
1220
28.7M
    }
1221
12.9M
    return 0;
1222
12.9M
}
1223
1224
static int
1225
list_extend_iter_lock_held(PyListObject *self, PyObject *iterable)
1226
6.13M
{
1227
6.13M
    PyObject *it = PyObject_GetIter(iterable);
1228
6.13M
    if (it == NULL) {
1229
0
        return -1;
1230
0
    }
1231
6.13M
    PyObject *(*iternext)(PyObject *) = *Py_TYPE(it)->tp_iternext;
1232
1233
    /* Guess a result list size. */
1234
6.13M
    Py_ssize_t n = PyObject_LengthHint(iterable, 8);
1235
6.13M
    if (n < 0) {
1236
0
        Py_DECREF(it);
1237
0
        return -1;
1238
0
    }
1239
1240
6.13M
    Py_ssize_t m = Py_SIZE(self);
1241
6.13M
    if (m > PY_SSIZE_T_MAX - n) {
1242
        /* m + n overflowed; on the chance that n lied, and there really
1243
         * is enough room, ignore it.  If n was telling the truth, we'll
1244
         * eventually run out of memory during the loop.
1245
         */
1246
0
    }
1247
6.13M
    else if (self->ob_item == NULL) {
1248
5.75M
        if (n && list_preallocate_exact(self, n) < 0)
1249
0
            goto error;
1250
5.75M
    }
1251
384k
    else {
1252
        /* Make room. */
1253
384k
        if (list_resize(self, m + n) < 0) {
1254
0
            goto error;
1255
0
        }
1256
1257
        /* Make the list sane again. */
1258
384k
        Py_SET_SIZE(self, m);
1259
384k
    }
1260
1261
    /* Run iterator to exhaustion. */
1262
65.8M
    for (;;) {
1263
65.8M
        PyObject *item = iternext(it);
1264
65.8M
        if (item == NULL) {
1265
6.13M
            if (PyErr_Occurred()) {
1266
653
                if (PyErr_ExceptionMatches(PyExc_StopIteration))
1267
0
                    PyErr_Clear();
1268
653
                else
1269
653
                    goto error;
1270
653
            }
1271
6.13M
            break;
1272
6.13M
        }
1273
1274
59.7M
        if (Py_SIZE(self) < self->allocated) {
1275
58.7M
            Py_ssize_t len = Py_SIZE(self);
1276
58.7M
            FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item[len], item);  // steals item ref
1277
58.7M
            Py_SET_SIZE(self, len + 1);
1278
58.7M
        }
1279
1.01M
        else {
1280
1.01M
            if (_PyList_AppendTakeRef(self, item) < 0)
1281
0
                goto error;
1282
1.01M
        }
1283
59.7M
    }
1284
1285
    /* Cut back result list if initial guess was too large. */
1286
6.13M
    if (Py_SIZE(self) < self->allocated) {
1287
4.70M
        if (list_resize(self, Py_SIZE(self)) < 0)
1288
0
            goto error;
1289
4.70M
    }
1290
1291
6.13M
    Py_DECREF(it);
1292
6.13M
    return 0;
1293
1294
653
  error:
1295
653
    Py_DECREF(it);
1296
653
    return -1;
1297
6.13M
}
1298
1299
static int
1300
list_extend_lock_held(PyListObject *self, PyObject *iterable)
1301
22.6M
{
1302
22.6M
    PyObject *seq = PySequence_Fast(iterable, "argument must be iterable");
1303
22.6M
    if (!seq) {
1304
0
        return -1;
1305
0
    }
1306
1307
22.6M
    int res = list_extend_fast(self, seq);
1308
22.6M
    Py_DECREF(seq);
1309
22.6M
    return res;
1310
22.6M
}
1311
1312
static int
1313
list_extend_set(PyListObject *self, PySetObject *other)
1314
20.3k
{
1315
20.3k
    Py_ssize_t m = Py_SIZE(self);
1316
20.3k
    Py_ssize_t n = PySet_GET_SIZE(other);
1317
20.3k
    Py_ssize_t r = m + n;
1318
20.3k
    if (r == 0) {
1319
867
        return 0;
1320
867
    }
1321
19.4k
    if (list_resize(self, r) < 0) {
1322
0
        return -1;
1323
0
    }
1324
1325
19.4k
    assert(self->ob_item != NULL);
1326
    /* populate the end of self with iterable's items */
1327
19.4k
    Py_ssize_t setpos = 0;
1328
19.4k
    Py_hash_t hash;
1329
19.4k
    PyObject *key;
1330
19.4k
    PyObject **dest = self->ob_item + m;
1331
123k
    while (_PySet_NextEntryRef((PyObject *)other, &setpos, &key, &hash)) {
1332
103k
        FT_ATOMIC_STORE_PTR_RELEASE(*dest, key);
1333
103k
        dest++;
1334
103k
    }
1335
19.4k
    Py_SET_SIZE(self, r);
1336
19.4k
    return 0;
1337
19.4k
}
1338
1339
static int
1340
list_extend_dict(PyListObject *self, PyDictObject *dict, int which_item)
1341
311
{
1342
    // which_item: 0 for keys and 1 for values
1343
311
    Py_ssize_t m = Py_SIZE(self);
1344
311
    Py_ssize_t n = PyDict_GET_SIZE(dict);
1345
311
    Py_ssize_t r = m + n;
1346
311
    if (r == 0) {
1347
0
        return 0;
1348
0
    }
1349
311
    if (list_resize(self, r) < 0) {
1350
0
        return -1;
1351
0
    }
1352
1353
311
    assert(self->ob_item != NULL);
1354
311
    PyObject **dest = self->ob_item + m;
1355
311
    Py_ssize_t pos = 0;
1356
311
    PyObject *keyvalue[2];
1357
1.69k
    while (_PyDict_Next((PyObject *)dict, &pos, &keyvalue[0], &keyvalue[1], NULL)) {
1358
1.38k
        PyObject *obj = keyvalue[which_item];
1359
1.38k
        Py_INCREF(obj);
1360
1.38k
        FT_ATOMIC_STORE_PTR_RELEASE(*dest, obj);
1361
1.38k
        dest++;
1362
1.38k
    }
1363
1364
311
    Py_SET_SIZE(self, r);
1365
311
    return 0;
1366
311
}
1367
1368
static int
1369
list_extend_dictitems(PyListObject *self, PyDictObject *dict)
1370
0
{
1371
0
    Py_ssize_t m = Py_SIZE(self);
1372
0
    Py_ssize_t n = PyDict_GET_SIZE(dict);
1373
0
    Py_ssize_t r = m + n;
1374
0
    if (r == 0) {
1375
0
        return 0;
1376
0
    }
1377
0
    if (list_resize(self, r) < 0) {
1378
0
        return -1;
1379
0
    }
1380
1381
0
    assert(self->ob_item != NULL);
1382
0
    PyObject **dest = self->ob_item + m;
1383
0
    Py_ssize_t pos = 0;
1384
0
    Py_ssize_t i = 0;
1385
0
    PyObject *key, *value;
1386
0
    while (_PyDict_Next((PyObject *)dict, &pos, &key, &value, NULL)) {
1387
0
        PyObject *item = PyTuple_Pack(2, key, value);
1388
0
        if (item == NULL) {
1389
0
            Py_SET_SIZE(self, m + i);
1390
0
            return -1;
1391
0
        }
1392
0
        FT_ATOMIC_STORE_PTR_RELEASE(*dest, item);
1393
0
        dest++;
1394
0
        i++;
1395
0
    }
1396
1397
0
    Py_SET_SIZE(self, r);
1398
0
    return 0;
1399
0
}
1400
1401
static int
1402
_list_extend(PyListObject *self, PyObject *iterable)
1403
28.8M
{
1404
    // Special case:
1405
    // lists and tuples which can use PySequence_Fast ops
1406
28.8M
    int res = -1;
1407
28.8M
    if ((PyObject *)self == iterable) {
1408
0
        Py_BEGIN_CRITICAL_SECTION(self);
1409
0
        res = list_inplace_repeat_lock_held(self, 2);
1410
0
        Py_END_CRITICAL_SECTION();
1411
0
    }
1412
28.8M
    else if (PyList_CheckExact(iterable)) {
1413
11.1M
        Py_BEGIN_CRITICAL_SECTION2(self, iterable);
1414
11.1M
        res = list_extend_lock_held(self, iterable);
1415
11.1M
        Py_END_CRITICAL_SECTION2();
1416
11.1M
    }
1417
17.7M
    else if (PyTuple_CheckExact(iterable)) {
1418
11.5M
        Py_BEGIN_CRITICAL_SECTION(self);
1419
11.5M
        res = list_extend_lock_held(self, iterable);
1420
11.5M
        Py_END_CRITICAL_SECTION();
1421
11.5M
    }
1422
6.15M
    else if (PyAnySet_CheckExact(iterable)) {
1423
20.3k
        Py_BEGIN_CRITICAL_SECTION2(self, iterable);
1424
20.3k
        res = list_extend_set(self, (PySetObject *)iterable);
1425
20.3k
        Py_END_CRITICAL_SECTION2();
1426
20.3k
    }
1427
6.13M
    else if (PyDict_CheckExact(iterable)) {
1428
309
        Py_BEGIN_CRITICAL_SECTION2(self, iterable);
1429
309
        res = list_extend_dict(self, (PyDictObject *)iterable, 0 /*keys*/);
1430
309
        Py_END_CRITICAL_SECTION2();
1431
309
    }
1432
6.13M
    else if (Py_IS_TYPE(iterable, &PyDictKeys_Type)) {
1433
0
        PyDictObject *dict = ((_PyDictViewObject *)iterable)->dv_dict;
1434
0
        Py_BEGIN_CRITICAL_SECTION2(self, dict);
1435
0
        res = list_extend_dict(self, dict, 0 /*keys*/);
1436
0
        Py_END_CRITICAL_SECTION2();
1437
0
    }
1438
6.13M
    else if (Py_IS_TYPE(iterable, &PyDictValues_Type)) {
1439
2
        PyDictObject *dict = ((_PyDictViewObject *)iterable)->dv_dict;
1440
2
        Py_BEGIN_CRITICAL_SECTION2(self, dict);
1441
2
        res = list_extend_dict(self, dict, 1 /*values*/);
1442
2
        Py_END_CRITICAL_SECTION2();
1443
2
    }
1444
6.13M
    else if (Py_IS_TYPE(iterable, &PyDictItems_Type)) {
1445
0
        PyDictObject *dict = ((_PyDictViewObject *)iterable)->dv_dict;
1446
0
        Py_BEGIN_CRITICAL_SECTION2(self, dict);
1447
0
        res = list_extend_dictitems(self, dict);
1448
0
        Py_END_CRITICAL_SECTION2();
1449
0
    }
1450
6.13M
    else {
1451
6.13M
        Py_BEGIN_CRITICAL_SECTION(self);
1452
6.13M
        res = list_extend_iter_lock_held(self, iterable);
1453
6.13M
        Py_END_CRITICAL_SECTION();
1454
6.13M
    }
1455
28.8M
    return res;
1456
28.8M
}
1457
1458
/*[clinic input]
1459
list.extend as list_extend
1460
1461
     iterable: object
1462
     /
1463
1464
Extend list by appending elements from the iterable.
1465
[clinic start generated code]*/
1466
1467
static PyObject *
1468
list_extend_impl(PyListObject *self, PyObject *iterable)
1469
/*[clinic end generated code: output=b0eba9e0b186d5ce input=979da7597a515791]*/
1470
18.5M
{
1471
18.5M
    if (_list_extend(self, iterable) < 0) {
1472
653
        return NULL;
1473
653
    }
1474
18.5M
    Py_RETURN_NONE;
1475
18.5M
}
1476
1477
PyObject *
1478
_PyList_Extend(PyListObject *self, PyObject *iterable)
1479
17.2M
{
1480
17.2M
    return list_extend((PyObject*)self, iterable);
1481
17.2M
}
1482
1483
int
1484
PyList_Extend(PyObject *self, PyObject *iterable)
1485
0
{
1486
0
    if (!PyList_Check(self)) {
1487
0
        PyErr_BadInternalCall();
1488
0
        return -1;
1489
0
    }
1490
0
    return _list_extend((PyListObject*)self, iterable);
1491
0
}
1492
1493
1494
int
1495
PyList_Clear(PyObject *self)
1496
0
{
1497
0
    if (!PyList_Check(self)) {
1498
0
        PyErr_BadInternalCall();
1499
0
        return -1;
1500
0
    }
1501
0
    Py_BEGIN_CRITICAL_SECTION(self);
1502
0
    list_clear((PyListObject*)self);
1503
0
    Py_END_CRITICAL_SECTION();
1504
0
    return 0;
1505
0
}
1506
1507
1508
static PyObject *
1509
list_inplace_concat(PyObject *_self, PyObject *other)
1510
422
{
1511
422
    PyListObject *self = (PyListObject *)_self;
1512
422
    if (_list_extend(self, other) < 0) {
1513
0
        return NULL;
1514
0
    }
1515
422
    return Py_NewRef(self);
1516
422
}
1517
1518
/*[clinic input]
1519
@critical_section
1520
list.pop
1521
1522
    index: Py_ssize_t = -1
1523
    /
1524
1525
Remove and return item at index (default last).
1526
1527
Raises IndexError if list is empty or index is out of range.
1528
[clinic start generated code]*/
1529
1530
static PyObject *
1531
list_pop_impl(PyListObject *self, Py_ssize_t index)
1532
/*[clinic end generated code: output=6bd69dcb3f17eca8 input=c269141068ae4b8f]*/
1533
30.6M
{
1534
30.6M
    PyObject *v;
1535
30.6M
    int status;
1536
1537
30.6M
    if (Py_SIZE(self) == 0) {
1538
        /* Special-case most common failure cause */
1539
0
        PyErr_SetString(PyExc_IndexError, "pop from empty list");
1540
0
        return NULL;
1541
0
    }
1542
30.6M
    if (index < 0)
1543
12.8M
        index += Py_SIZE(self);
1544
30.6M
    if (!valid_index(index, Py_SIZE(self))) {
1545
0
        PyErr_SetString(PyExc_IndexError, "pop index out of range");
1546
0
        return NULL;
1547
0
    }
1548
1549
30.6M
    PyObject **items = self->ob_item;
1550
30.6M
    v = items[index];
1551
30.6M
    const Py_ssize_t size_after_pop = Py_SIZE(self) - 1;
1552
30.6M
    if (size_after_pop == 0) {
1553
12.6M
        Py_INCREF(v);
1554
12.6M
        list_clear(self);
1555
12.6M
        status = 0;
1556
12.6M
    }
1557
18.0M
    else {
1558
18.0M
        if ((size_after_pop - index) > 0) {
1559
11.0M
            memmove(&items[index], &items[index+1], (size_after_pop - index) * sizeof(PyObject *));
1560
11.0M
        }
1561
18.0M
        status = list_resize(self, size_after_pop);
1562
18.0M
    }
1563
30.6M
    if (status >= 0) {
1564
30.6M
        return v; // and v now owns the reference the list had
1565
30.6M
    }
1566
0
    else {
1567
        // list resize failed, need to restore
1568
0
        memmove(&items[index+1], &items[index], (size_after_pop - index)* sizeof(PyObject *));
1569
0
        items[index] = v;
1570
0
        return NULL;
1571
0
    }
1572
30.6M
}
1573
1574
/* Reverse a slice of a list in place, from lo up to (exclusive) hi. */
1575
static void
1576
reverse_slice(PyObject **lo, PyObject **hi)
1577
119k
{
1578
119k
    assert(lo && hi);
1579
1580
119k
    --hi;
1581
461k
    while (lo < hi) {
1582
342k
        PyObject *t = *lo;
1583
342k
        *lo = *hi;
1584
342k
        *hi = t;
1585
342k
        ++lo;
1586
342k
        --hi;
1587
342k
    }
1588
119k
}
1589
1590
/* Lots of code for an adaptive, stable, natural mergesort.  There are many
1591
 * pieces to this algorithm; read listsort.txt for overviews and details.
1592
 */
1593
1594
/* A sortslice contains a pointer to an array of keys and a pointer to
1595
 * an array of corresponding values.  In other words, keys[i]
1596
 * corresponds with values[i].  If values == NULL, then the keys are
1597
 * also the values.
1598
 *
1599
 * Several convenience routines are provided here, so that keys and
1600
 * values are always moved in sync.
1601
 */
1602
1603
typedef struct {
1604
    PyObject **keys;
1605
    PyObject **values;
1606
} sortslice;
1607
1608
Py_LOCAL_INLINE(void)
1609
sortslice_copy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j)
1610
37.5k
{
1611
37.5k
    s1->keys[i] = s2->keys[j];
1612
37.5k
    if (s1->values != NULL)
1613
36.2k
        s1->values[i] = s2->values[j];
1614
37.5k
}
1615
1616
Py_LOCAL_INLINE(void)
1617
sortslice_copy_incr(sortslice *dst, sortslice *src)
1618
579k
{
1619
579k
    *dst->keys++ = *src->keys++;
1620
579k
    if (dst->values != NULL)
1621
340k
        *dst->values++ = *src->values++;
1622
579k
}
1623
1624
Py_LOCAL_INLINE(void)
1625
sortslice_copy_decr(sortslice *dst, sortslice *src)
1626
368k
{
1627
368k
    *dst->keys-- = *src->keys--;
1628
368k
    if (dst->values != NULL)
1629
292k
        *dst->values-- = *src->values--;
1630
368k
}
1631
1632
1633
Py_LOCAL_INLINE(void)
1634
sortslice_memcpy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
1635
                 Py_ssize_t n)
1636
213k
{
1637
213k
    memcpy(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
1638
213k
    if (s1->values != NULL)
1639
176k
        memcpy(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
1640
213k
}
1641
1642
Py_LOCAL_INLINE(void)
1643
sortslice_memmove(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
1644
                  Py_ssize_t n)
1645
146k
{
1646
146k
    memmove(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
1647
146k
    if (s1->values != NULL)
1648
117k
        memmove(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
1649
146k
}
1650
1651
Py_LOCAL_INLINE(void)
1652
sortslice_advance(sortslice *slice, Py_ssize_t n)
1653
864k
{
1654
864k
    slice->keys += n;
1655
864k
    if (slice->values != NULL)
1656
652k
        slice->values += n;
1657
864k
}
1658
1659
/* Comparison function: ms->key_compare, which is set at run-time in
1660
 * listsort_impl to optimize for various special cases.
1661
 * Returns -1 on error, 1 if x < y, 0 if x >= y.
1662
 */
1663
1664
16.3M
#define ISLT(X, Y) (*(ms->key_compare))(X, Y, ms)
1665
1666
/* Compare X to Y via "<".  Goto "fail" if the comparison raises an
1667
   error.  Else "k" is set to true iff X<Y, and an "if (k)" block is
1668
   started.  It makes more sense in context <wink>.  X and Y are PyObject*s.
1669
*/
1670
15.6M
#define IFLT(X, Y) if ((k = ISLT(X, Y)) < 0) goto fail;  \
1671
15.6M
           if (k)
1672
1673
/* The maximum number of entries in a MergeState's pending-runs stack.
1674
 * For a list with n elements, this needs at most floor(log2(n)) + 1 entries
1675
 * even if we didn't force runs to a minimal length.  So the number of bits
1676
 * in a Py_ssize_t is plenty large enough for all cases.
1677
 */
1678
#define MAX_MERGE_PENDING (SIZEOF_SIZE_T * 8)
1679
1680
/* When we get into galloping mode, we stay there until both runs win less
1681
 * often than MIN_GALLOP consecutive times.  See listsort.txt for more info.
1682
 */
1683
1.04M
#define MIN_GALLOP 7
1684
1685
/* Avoid malloc for small temp arrays. */
1686
1.96M
#define MERGESTATE_TEMP_SIZE 256
1687
1688
/* The largest value of minrun. This must be a power of 2, and >= 1 */
1689
834k
#define MAX_MINRUN 64
1690
#if ((MAX_MINRUN) < 1) || ((MAX_MINRUN) & ((MAX_MINRUN) - 1))
1691
#error "MAX_MINRUN must be a power of 2, and >= 1"
1692
#endif
1693
1694
/* One MergeState exists on the stack per invocation of mergesort.  It's just
1695
 * a convenient way to pass state around among the helper functions.
1696
 */
1697
struct s_slice {
1698
    sortslice base;
1699
    Py_ssize_t len;   /* length of run */
1700
    int power; /* node "level" for powersort merge strategy */
1701
};
1702
1703
typedef struct s_MergeState MergeState;
1704
struct s_MergeState {
1705
    /* This controls when we get *into* galloping mode.  It's initialized
1706
     * to MIN_GALLOP.  merge_lo and merge_hi tend to nudge it higher for
1707
     * random data, and lower for highly structured data.
1708
     */
1709
    Py_ssize_t min_gallop;
1710
1711
    Py_ssize_t listlen;     /* len(input_list) - read only */
1712
    PyObject **basekeys;    /* base address of keys array - read only */
1713
1714
    /* 'a' is temp storage to help with merges.  It contains room for
1715
     * alloced entries.
1716
     */
1717
    sortslice a;        /* may point to temparray below */
1718
    Py_ssize_t alloced;
1719
1720
    /* A stack of n pending runs yet to be merged.  Run #i starts at
1721
     * address base[i] and extends for len[i] elements.  It's always
1722
     * true (so long as the indices are in bounds) that
1723
     *
1724
     *     pending[i].base + pending[i].len == pending[i+1].base
1725
     *
1726
     * so we could cut the storage for this, but it's a minor amount,
1727
     * and keeping all the info explicit simplifies the code.
1728
     */
1729
    int n;
1730
    struct s_slice pending[MAX_MERGE_PENDING];
1731
1732
    /* 'a' points to this when possible, rather than muck with malloc. */
1733
    PyObject *temparray[MERGESTATE_TEMP_SIZE];
1734
1735
    /* This is the function we will use to compare two keys,
1736
     * even when none of our special cases apply and we have to use
1737
     * safe_object_compare. */
1738
    int (*key_compare)(PyObject *, PyObject *, MergeState *);
1739
1740
    /* This function is used by unsafe_object_compare to optimize comparisons
1741
     * when we know our list is type-homogeneous but we can't assume anything else.
1742
     * In the pre-sort check it is set equal to Py_TYPE(key)->tp_richcompare */
1743
    PyObject *(*key_richcompare)(PyObject *, PyObject *, int);
1744
1745
    /* This function is used by unsafe_tuple_compare to compare the first elements
1746
     * of tuples. It may be set to safe_object_compare, but the idea is that hopefully
1747
     * we can assume more, and use one of the special-case compares. */
1748
    int (*tuple_elem_compare)(PyObject *, PyObject *, MergeState *);
1749
1750
    /* Varisbles used for minrun computation. The "ideal" minrun length is
1751
     * the infinite precision listlen / 2**e. See listsort.txt.
1752
     */
1753
     Py_ssize_t mr_current, mr_e, mr_mask;
1754
};
1755
1756
/* binarysort is the best method for sorting small arrays: it does few
1757
   compares, but can do data movement quadratic in the number of elements.
1758
   ss->keys is viewed as an array of n kays, a[:n]. a[:ok] is already sorted.
1759
   Pass ok = 0 (or 1) if you don't know.
1760
   It's sorted in-place, by a stable binary insertion sort. If ss->values
1761
   isn't NULL, it's permuted in lockstap with ss->keys.
1762
   On entry, must have n >= 1, and 0 <= ok <= n <= MAX_MINRUN.
1763
   Return -1 if comparison raises an exception, else 0.
1764
   Even in case of error, the output slice will be some permutation of
1765
   the input (nothing is lost or duplicated).
1766
*/
1767
static int
1768
binarysort(MergeState *ms, const sortslice *ss, Py_ssize_t n, Py_ssize_t ok)
1769
132k
{
1770
132k
    Py_ssize_t k; /* for IFLT macro expansion */
1771
132k
    PyObject ** const a = ss->keys;
1772
132k
    PyObject ** const v = ss->values;
1773
132k
    const bool has_values = v != NULL;
1774
132k
    PyObject *pivot;
1775
132k
    Py_ssize_t M;
1776
1777
132k
    assert(0 <= ok && ok <= n && 1 <= n && n <= MAX_MINRUN);
1778
    /* assert a[:ok] is sorted */
1779
132k
    if (! ok)
1780
0
        ++ok;
1781
    /* Regular insertion sort has average- and worst-case O(n**2) cost
1782
       for both # of comparisons and number of bytes moved. But its branches
1783
       are highly predictable, and it loves sorted input (n-1 compares and no
1784
       data movement). This is significant in cases like sortperf.py's %sort,
1785
       where an out-of-order element near the start of a run is moved into
1786
       place slowly but then the remaining elements up to length minrun are
1787
       generally at worst one slot away from their correct position (so only
1788
       need 1 or 2 commpares to resolve). If comparisons are very fast (such
1789
       as for a list of Python floats), the simple inner loop leaves it
1790
       very competitive with binary insertion, despite that it does
1791
       significantly more compares overall on random data.
1792
1793
       Binary insertion sort has worst, average, and best case O(n log n)
1794
       cost for # of comparisons, but worst and average case O(n**2) cost
1795
       for data movement. The more expensive comparisons, the more important
1796
       the comparison advantage. But its branches are less predictable the
1797
       more "randomish" the data, and that's so significant its worst case
1798
       in real life is random input rather than reverse-ordered (which does
1799
       about twice the data movement than random input does).
1800
1801
       Note that the number of bytes moved doesn't seem to matter. MAX_MINRUN
1802
       of 64 is so small that the key and value pointers all fit in a corner
1803
       of L1 cache, and moving things around in that is very fast. */
1804
#if 0 // ordinary insertion sort.
1805
    PyObject * vpivot = NULL;
1806
    for (; ok < n; ++ok) {
1807
        pivot = a[ok];
1808
        if (has_values)
1809
            vpivot = v[ok];
1810
        for (M = ok - 1; M >= 0; --M) {
1811
            k = ISLT(pivot, a[M]);
1812
            if (k < 0) {
1813
                a[M + 1] = pivot;
1814
                if (has_values)
1815
                    v[M + 1] = vpivot;
1816
                goto fail;
1817
            }
1818
            else if (k) {
1819
                a[M + 1] = a[M];
1820
                if (has_values)
1821
                    v[M + 1] = v[M];
1822
            }
1823
            else
1824
                break;
1825
        }
1826
        a[M + 1] = pivot;
1827
        if (has_values)
1828
            v[M + 1] = vpivot;
1829
    }
1830
#else // binary insertion sort
1831
132k
    Py_ssize_t L, R;
1832
2.35M
    for (; ok < n; ++ok) {
1833
        /* set L to where a[ok] belongs */
1834
2.22M
        L = 0;
1835
2.22M
        R = ok;
1836
2.22M
        pivot = a[ok];
1837
        /* Slice invariants. vacuously true at the start:
1838
         * all a[0:L]  <= pivot
1839
         * all a[L:R]     unknown
1840
         * all a[R:ok]  > pivot
1841
         */
1842
2.22M
        assert(L < R);
1843
9.44M
        do {
1844
            /* don't do silly ;-) things to prevent overflow when finding
1845
               the midpoint; L and R are very far from filling a Py_ssize_t */
1846
9.44M
            M = (L + R) >> 1;
1847
9.44M
#if 1 // straightforward, but highly unpredictable branch on random data
1848
9.44M
            IFLT(pivot, a[M])
1849
3.90M
                R = M;
1850
5.53M
            else
1851
5.53M
                L = M + 1;
1852
#else
1853
            /* Try to get compiler to generate conditional move instructions
1854
               instead. Works fine, but leaving it disabled for now because
1855
               it's not yielding consistently faster sorts. Needs more
1856
               investigation. More computation in the inner loop adds its own
1857
               costs, which can be significant when compares are fast. */
1858
            k = ISLT(pivot, a[M]);
1859
            if (k < 0)
1860
                goto fail;
1861
            Py_ssize_t Mp1 = M + 1;
1862
            R = k ? M : R;
1863
            L = k ? L : Mp1;
1864
#endif
1865
9.44M
        } while (L < R);
1866
2.22M
        assert(L == R);
1867
        /* a[:L] holds all elements from a[:ok] <= pivot now, so pivot belongs
1868
           at index L. Slide a[L:ok] to the right a slot to make room for it.
1869
           Caution: using memmove is much slower under MSVC 5; we're not
1870
           usually moving many slots. Years later: under Visual Studio 2022,
1871
           memmove seems just slightly slower than doing it "by hand". */
1872
16.4M
        for (M = ok; M > L; --M)
1873
14.2M
            a[M] = a[M - 1];
1874
2.22M
        a[L] = pivot;
1875
2.22M
        if (has_values) {
1876
1.61M
            pivot = v[ok];
1877
8.40M
            for (M = ok; M > L; --M)
1878
6.78M
                v[M] = v[M - 1];
1879
1.61M
            v[L] = pivot;
1880
1.61M
        }
1881
2.22M
    }
1882
132k
#endif // pick binary or regular insertion sort
1883
132k
    return 0;
1884
1885
0
 fail:
1886
0
    return -1;
1887
132k
}
1888
1889
static void
1890
sortslice_reverse(sortslice *s, Py_ssize_t n)
1891
74.1k
{
1892
74.1k
    reverse_slice(s->keys, &s->keys[n]);
1893
74.1k
    if (s->values != NULL)
1894
44.9k
        reverse_slice(s->values, &s->values[n]);
1895
74.1k
}
1896
1897
/*
1898
Return the length of the run beginning at slo->keys, spanning no more than
1899
nremaining elements. The run beginning there may be ascending or descending,
1900
but the function permutes it in place, if needed, so that it's always ascending
1901
upon return.
1902
1903
Returns -1 in case of error.
1904
*/
1905
static Py_ssize_t
1906
count_run(MergeState *ms, sortslice *slo, Py_ssize_t nremaining)
1907
293k
{
1908
293k
    Py_ssize_t k; /* used by IFLT macro expansion */
1909
293k
    Py_ssize_t n;
1910
293k
    PyObject ** const lo = slo->keys;
1911
1912
    /* In general, as things go on we've established that the slice starts
1913
       with a monotone run of n elements, starting at lo. */
1914
1915
    /* We're n elements into the slice, and the most recent neq+1 elements are
1916
     * all equal. This reverses them in-place, and resets neq for reuse.
1917
     */
1918
293k
#define REVERSE_LAST_NEQ                        \
1919
293k
    if (neq) {                                  \
1920
6.10k
        sortslice slice = *slo;                 \
1921
6.10k
        ++neq;                                  \
1922
6.10k
        sortslice_advance(&slice, n - neq);     \
1923
6.10k
        sortslice_reverse(&slice, neq);         \
1924
6.10k
        neq = 0;                                \
1925
6.10k
    }
1926
1927
    /* Sticking to only __lt__ compares is confusing and error-prone. But in
1928
     * this routine, almost all uses of IFLT can be captured by tiny macros
1929
     * giving mnemonic names to the intent. Note that inline functions don't
1930
     * work for this (IFLT expands to code including `goto fail`).
1931
     */
1932
293k
#define IF_NEXT_LARGER  IFLT(lo[n-1], lo[n])
1933
3.67M
#define IF_NEXT_SMALLER IFLT(lo[n], lo[n-1])
1934
1935
293k
    assert(nremaining);
1936
    /* try ascending run first */
1937
3.23M
    for (n = 1; n < nremaining; ++n) {
1938
3.12M
        IF_NEXT_SMALLER
1939
182k
            break;
1940
3.12M
    }
1941
293k
    if (n == nremaining)
1942
111k
        return n;
1943
    /* lo[n] is strictly less */
1944
    /* If n is 1 now, then the first compare established it's a descending
1945
     * run, so fall through to the descending case. But if n > 1, there are
1946
     * n elements in an ascending run terminated by the strictly less lo[n].
1947
     * If the first key < lo[n-1], *somewhere* along the way the sequence
1948
     * increased, so we're done (there is no descending run).
1949
     * Else first key >= lo[n-1], which implies that the entire ascending run
1950
     * consists of equal elements. In that case, this is a descending run,
1951
     * and we reverse the all-equal prefix in-place.
1952
     */
1953
182k
    if (n > 1) {
1954
124k
        IFLT(lo[0], lo[n-1])
1955
119k
            return n;
1956
5.00k
        sortslice_reverse(slo, n);
1957
5.00k
    }
1958
63.0k
    ++n; /* in all cases it's been established that lo[n] has been resolved */
1959
1960
    /* Finish descending run. All-squal subruns are reversed in-place on the
1961
     * fly. Their original order will be restored at the end by the whole-slice
1962
     * reversal.
1963
     */
1964
63.0k
    Py_ssize_t neq = 0;
1965
101k
    for ( ; n < nremaining; ++n) {
1966
75.1k
        IF_NEXT_SMALLER {
1967
            /* This ends the most recent run of equal elements, but still in
1968
             * the "descending" direction.
1969
             */
1970
15.6k
            REVERSE_LAST_NEQ
1971
15.6k
        }
1972
59.5k
        else {
1973
59.5k
            IF_NEXT_LARGER /* descending run is over */
1974
36.7k
                break;
1975
22.7k
            else /* not x < y and not y < x implies x == y */
1976
22.7k
                ++neq;
1977
59.5k
        }
1978
75.1k
    }
1979
63.0k
    REVERSE_LAST_NEQ
1980
63.0k
    sortslice_reverse(slo, n); /* transform to ascending run */
1981
1982
    /* And after reversing, it's possible this can be extended by a
1983
     * naturally increasing suffix; e.g., [3, 2, 3, 4, 1] makes an
1984
     * ascending run from the first 4 elements.
1985
     */
1986
503k
    for ( ; n < nremaining; ++n) {
1987
474k
        IF_NEXT_SMALLER
1988
34.6k
            break;
1989
474k
    }
1990
1991
63.0k
    return n;
1992
0
fail:
1993
0
    return -1;
1994
1995
63.0k
#undef REVERSE_LAST_NEQ
1996
63.0k
#undef IF_NEXT_SMALLER
1997
63.0k
#undef IF_NEXT_LARGER
1998
63.0k
}
1999
2000
/*
2001
Locate the proper position of key in a sorted vector; if the vector contains
2002
an element equal to key, return the position immediately to the left of
2003
the leftmost equal element.  [gallop_right() does the same except returns
2004
the position to the right of the rightmost equal element (if any).]
2005
2006
"a" is a sorted vector with n elements, starting at a[0].  n must be > 0.
2007
2008
"hint" is an index at which to begin the search, 0 <= hint < n.  The closer
2009
hint is to the final result, the faster this runs.
2010
2011
The return value is the int k in 0..n such that
2012
2013
    a[k-1] < key <= a[k]
2014
2015
pretending that *(a-1) is minus infinity and a[n] is plus infinity.  IOW,
2016
key belongs at index k; or, IOW, the first k elements of a should precede
2017
key, and the last n-k should follow key.
2018
2019
Returns -1 on error.  See listsort.txt for info on the method.
2020
*/
2021
static Py_ssize_t
2022
gallop_left(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
2023
204k
{
2024
204k
    Py_ssize_t ofs;
2025
204k
    Py_ssize_t lastofs;
2026
204k
    Py_ssize_t k;
2027
2028
204k
    assert(key && a && n > 0 && hint >= 0 && hint < n);
2029
2030
204k
    a += hint;
2031
204k
    lastofs = 0;
2032
204k
    ofs = 1;
2033
204k
    IFLT(*a, key) {
2034
        /* a[hint] < key -- gallop right, until
2035
         * a[hint + lastofs] < key <= a[hint + ofs]
2036
         */
2037
112k
        const Py_ssize_t maxofs = n - hint;             /* &a[n-1] is highest */
2038
378k
        while (ofs < maxofs) {
2039
298k
            IFLT(a[ofs], key) {
2040
266k
                lastofs = ofs;
2041
266k
                assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
2042
266k
                ofs = (ofs << 1) + 1;
2043
266k
            }
2044
31.8k
            else                /* key <= a[hint + ofs] */
2045
31.8k
                break;
2046
298k
        }
2047
112k
        if (ofs > maxofs)
2048
34.6k
            ofs = maxofs;
2049
        /* Translate back to offsets relative to &a[0]. */
2050
112k
        lastofs += hint;
2051
112k
        ofs += hint;
2052
112k
    }
2053
91.7k
    else {
2054
        /* key <= a[hint] -- gallop left, until
2055
         * a[hint - ofs] < key <= a[hint - lastofs]
2056
         */
2057
91.7k
        const Py_ssize_t maxofs = hint + 1;             /* &a[0] is lowest */
2058
306k
        while (ofs < maxofs) {
2059
278k
            IFLT(*(a-ofs), key)
2060
63.8k
                break;
2061
            /* key <= a[hint - ofs] */
2062
214k
            lastofs = ofs;
2063
214k
            assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
2064
214k
            ofs = (ofs << 1) + 1;
2065
214k
        }
2066
91.7k
        if (ofs > maxofs)
2067
21.0k
            ofs = maxofs;
2068
        /* Translate back to positive offsets relative to &a[0]. */
2069
91.7k
        k = lastofs;
2070
91.7k
        lastofs = hint - ofs;
2071
91.7k
        ofs = hint - k;
2072
91.7k
    }
2073
204k
    a -= hint;
2074
2075
204k
    assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
2076
    /* Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
2077
     * right of lastofs but no farther right than ofs.  Do a binary
2078
     * search, with invariant a[lastofs-1] < key <= a[ofs].
2079
     */
2080
204k
    ++lastofs;
2081
614k
    while (lastofs < ofs) {
2082
409k
        Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
2083
2084
409k
        IFLT(a[m], key)
2085
211k
            lastofs = m+1;              /* a[m] < key */
2086
197k
        else
2087
197k
            ofs = m;                    /* key <= a[m] */
2088
409k
    }
2089
204k
    assert(lastofs == ofs);             /* so a[ofs-1] < key <= a[ofs] */
2090
204k
    return ofs;
2091
2092
0
fail:
2093
0
    return -1;
2094
204k
}
2095
2096
/*
2097
Exactly like gallop_left(), except that if key already exists in a[0:n],
2098
finds the position immediately to the right of the rightmost equal value.
2099
2100
The return value is the int k in 0..n such that
2101
2102
    a[k-1] <= key < a[k]
2103
2104
or -1 if error.
2105
2106
The code duplication is massive, but this is enough different given that
2107
we're sticking to "<" comparisons that it's much harder to follow if
2108
written as one routine with yet another "left or right?" flag.
2109
*/
2110
static Py_ssize_t
2111
gallop_right(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
2112
228k
{
2113
228k
    Py_ssize_t ofs;
2114
228k
    Py_ssize_t lastofs;
2115
228k
    Py_ssize_t k;
2116
2117
228k
    assert(key && a && n > 0 && hint >= 0 && hint < n);
2118
2119
228k
    a += hint;
2120
228k
    lastofs = 0;
2121
228k
    ofs = 1;
2122
228k
    IFLT(key, *a) {
2123
        /* key < a[hint] -- gallop left, until
2124
         * a[hint - ofs] <= key < a[hint - lastofs]
2125
         */
2126
92.7k
        const Py_ssize_t maxofs = hint + 1;             /* &a[0] is lowest */
2127
224k
        while (ofs < maxofs) {
2128
152k
            IFLT(key, *(a-ofs)) {
2129
131k
                lastofs = ofs;
2130
131k
                assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
2131
131k
                ofs = (ofs << 1) + 1;
2132
131k
            }
2133
20.7k
            else                /* a[hint - ofs] <= key */
2134
20.7k
                break;
2135
152k
        }
2136
92.7k
        if (ofs > maxofs)
2137
14.2k
            ofs = maxofs;
2138
        /* Translate back to positive offsets relative to &a[0]. */
2139
92.7k
        k = lastofs;
2140
92.7k
        lastofs = hint - ofs;
2141
92.7k
        ofs = hint - k;
2142
92.7k
    }
2143
135k
    else {
2144
        /* a[hint] <= key -- gallop right, until
2145
         * a[hint + lastofs] <= key < a[hint + ofs]
2146
        */
2147
135k
        const Py_ssize_t maxofs = n - hint;             /* &a[n-1] is highest */
2148
461k
        while (ofs < maxofs) {
2149
400k
            IFLT(key, a[ofs])
2150
73.8k
                break;
2151
            /* a[hint + ofs] <= key */
2152
326k
            lastofs = ofs;
2153
326k
            assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
2154
326k
            ofs = (ofs << 1) + 1;
2155
326k
        }
2156
135k
        if (ofs > maxofs)
2157
35.1k
            ofs = maxofs;
2158
        /* Translate back to offsets relative to &a[0]. */
2159
135k
        lastofs += hint;
2160
135k
        ofs += hint;
2161
135k
    }
2162
228k
    a -= hint;
2163
2164
228k
    assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
2165
    /* Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the
2166
     * right of lastofs but no farther right than ofs.  Do a binary
2167
     * search, with invariant a[lastofs-1] <= key < a[ofs].
2168
     */
2169
228k
    ++lastofs;
2170
619k
    while (lastofs < ofs) {
2171
391k
        Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
2172
2173
391k
        IFLT(key, a[m])
2174
203k
            ofs = m;                    /* key < a[m] */
2175
187k
        else
2176
187k
            lastofs = m+1;              /* a[m] <= key */
2177
391k
    }
2178
228k
    assert(lastofs == ofs);             /* so a[ofs-1] <= key < a[ofs] */
2179
228k
    return ofs;
2180
2181
0
fail:
2182
0
    return -1;
2183
228k
}
2184
2185
/* Conceptually a MergeState's constructor. */
2186
static void
2187
merge_init(MergeState *ms, Py_ssize_t list_size, int has_keyfunc,
2188
           sortslice *lo)
2189
805k
{
2190
805k
    assert(ms != NULL);
2191
805k
    if (has_keyfunc) {
2192
        /* The temporary space for merging will need at most half the list
2193
         * size rounded up.  Use the minimum possible space so we can use the
2194
         * rest of temparray for other things.  In particular, if there is
2195
         * enough extra space, listsort() will use it to store the keys.
2196
         */
2197
578k
        ms->alloced = (list_size + 1) / 2;
2198
2199
        /* ms->alloced describes how many keys will be stored at
2200
           ms->temparray, but we also need to store the values.  Hence,
2201
           ms->alloced is capped at half of MERGESTATE_TEMP_SIZE. */
2202
578k
        if (MERGESTATE_TEMP_SIZE / 2 < ms->alloced)
2203
2.45k
            ms->alloced = MERGESTATE_TEMP_SIZE / 2;
2204
578k
        ms->a.values = &ms->temparray[ms->alloced];
2205
578k
    }
2206
227k
    else {
2207
227k
        ms->alloced = MERGESTATE_TEMP_SIZE;
2208
227k
        ms->a.values = NULL;
2209
227k
    }
2210
805k
    ms->a.keys = ms->temparray;
2211
805k
    ms->n = 0;
2212
805k
    ms->min_gallop = MIN_GALLOP;
2213
805k
    ms->listlen = list_size;
2214
805k
    ms->basekeys = lo->keys;
2215
2216
    /* State for generating minrun values. See listsort.txt. */
2217
805k
    ms->mr_e = 0;
2218
834k
    while (list_size >> ms->mr_e >= MAX_MINRUN) {
2219
29.5k
        ++ms->mr_e;
2220
29.5k
    }
2221
805k
    ms->mr_mask = (1 << ms->mr_e) - 1;
2222
805k
    ms->mr_current = 0;
2223
805k
}
2224
2225
/* Free all the temp memory owned by the MergeState.  This must be called
2226
 * when you're done with a MergeState, and may be called before then if
2227
 * you want to free the temp memory early.
2228
 */
2229
static void
2230
merge_freemem(MergeState *ms)
2231
810k
{
2232
810k
    assert(ms != NULL);
2233
810k
    if (ms->a.keys != ms->temparray) {
2234
5.45k
        PyMem_Free(ms->a.keys);
2235
5.45k
        ms->a.keys = NULL;
2236
5.45k
    }
2237
810k
}
2238
2239
/* Ensure enough temp memory for 'need' array slots is available.
2240
 * Returns 0 on success and -1 if the memory can't be gotten.
2241
 */
2242
static int
2243
merge_getmem(MergeState *ms, Py_ssize_t need)
2244
5.45k
{
2245
5.45k
    int multiplier;
2246
2247
5.45k
    assert(ms != NULL);
2248
5.45k
    if (need <= ms->alloced)
2249
0
        return 0;
2250
2251
5.45k
    multiplier = ms->a.values != NULL ? 2 : 1;
2252
2253
    /* Don't realloc!  That can cost cycles to copy the old data, but
2254
     * we don't care what's in the block.
2255
     */
2256
5.45k
    merge_freemem(ms);
2257
5.45k
    if ((size_t)need > PY_SSIZE_T_MAX / sizeof(PyObject *) / multiplier) {
2258
0
        PyErr_NoMemory();
2259
0
        return -1;
2260
0
    }
2261
5.45k
    ms->a.keys = (PyObject **)PyMem_Malloc(multiplier * need
2262
5.45k
                                          * sizeof(PyObject *));
2263
5.45k
    if (ms->a.keys != NULL) {
2264
5.45k
        ms->alloced = need;
2265
5.45k
        if (ms->a.values != NULL)
2266
4.90k
            ms->a.values = &ms->a.keys[need];
2267
5.45k
        return 0;
2268
5.45k
    }
2269
0
    PyErr_NoMemory();
2270
0
    return -1;
2271
5.45k
}
2272
78.8k
#define MERGE_GETMEM(MS, NEED) ((NEED) <= (MS)->alloced ? 0 :   \
2273
78.8k
                                merge_getmem(MS, NEED))
2274
2275
/* Merge the na elements starting at ssa with the nb elements starting at
2276
 * ssb.keys = ssa.keys + na in a stable way, in-place.  na and nb must be > 0.
2277
 * Must also have that ssa.keys[na-1] belongs at the end of the merge, and
2278
 * should have na <= nb.  See listsort.txt for more info.  Return 0 if
2279
 * successful, -1 if error.
2280
 */
2281
static Py_ssize_t
2282
merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na,
2283
         sortslice ssb, Py_ssize_t nb)
2284
51.4k
{
2285
51.4k
    Py_ssize_t k;
2286
51.4k
    sortslice dest;
2287
51.4k
    int result = -1;            /* guilty until proved innocent */
2288
51.4k
    Py_ssize_t min_gallop;
2289
2290
51.4k
    assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
2291
51.4k
    assert(ssa.keys + na == ssb.keys);
2292
51.4k
    if (MERGE_GETMEM(ms, na) < 0)
2293
0
        return -1;
2294
51.4k
    sortslice_memcpy(&ms->a, 0, &ssa, 0, na);
2295
51.4k
    dest = ssa;
2296
51.4k
    ssa = ms->a;
2297
2298
51.4k
    sortslice_copy_incr(&dest, &ssb);
2299
51.4k
    --nb;
2300
51.4k
    if (nb == 0)
2301
3.59k
        goto Succeed;
2302
47.8k
    if (na == 1)
2303
8.55k
        goto CopyB;
2304
2305
39.3k
    min_gallop = ms->min_gallop;
2306
58.1k
    for (;;) {
2307
58.1k
        Py_ssize_t acount = 0;          /* # of times A won in a row */
2308
58.1k
        Py_ssize_t bcount = 0;          /* # of times B won in a row */
2309
2310
        /* Do the straightforward thing until (if ever) one run
2311
         * appears to win consistently.
2312
         */
2313
387k
        for (;;) {
2314
387k
            assert(na > 1 && nb > 0);
2315
387k
            k = ISLT(ssb.keys[0], ssa.keys[0]);
2316
387k
            if (k) {
2317
210k
                if (k < 0)
2318
0
                    goto Fail;
2319
210k
                sortslice_copy_incr(&dest, &ssb);
2320
210k
                ++bcount;
2321
210k
                acount = 0;
2322
210k
                --nb;
2323
210k
                if (nb == 0)
2324
3.93k
                    goto Succeed;
2325
206k
                if (bcount >= min_gallop)
2326
27.9k
                    break;
2327
206k
            }
2328
177k
            else {
2329
177k
                sortslice_copy_incr(&dest, &ssa);
2330
177k
                ++acount;
2331
177k
                bcount = 0;
2332
177k
                --na;
2333
177k
                if (na == 1)
2334
2.70k
                    goto CopyB;
2335
174k
                if (acount >= min_gallop)
2336
23.5k
                    break;
2337
174k
            }
2338
387k
        }
2339
2340
        /* One run is winning so consistently that galloping may
2341
         * be a huge win.  So try that, and continue galloping until
2342
         * (if ever) neither run appears to be winning consistently
2343
         * anymore.
2344
         */
2345
51.5k
        ++min_gallop;
2346
89.4k
        do {
2347
89.4k
            assert(na > 1 && nb > 0);
2348
89.4k
            min_gallop -= min_gallop > 1;
2349
89.4k
            ms->min_gallop = min_gallop;
2350
89.4k
            k = gallop_right(ms, ssb.keys[0], ssa.keys, na, 0);
2351
89.4k
            acount = k;
2352
89.4k
            if (k) {
2353
51.7k
                if (k < 0)
2354
0
                    goto Fail;
2355
51.7k
                sortslice_memcpy(&dest, 0, &ssa, 0, k);
2356
51.7k
                sortslice_advance(&dest, k);
2357
51.7k
                sortslice_advance(&ssa, k);
2358
51.7k
                na -= k;
2359
51.7k
                if (na == 1)
2360
8.82k
                    goto CopyB;
2361
                /* na==0 is impossible now if the comparison
2362
                 * function is consistent, but we can't assume
2363
                 * that it is.
2364
                 */
2365
42.8k
                if (na == 0)
2366
0
                    goto Succeed;
2367
42.8k
            }
2368
80.6k
            sortslice_copy_incr(&dest, &ssb);
2369
80.6k
            --nb;
2370
80.6k
            if (nb == 0)
2371
2.23k
                goto Succeed;
2372
2373
78.4k
            k = gallop_left(ms, ssa.keys[0], ssb.keys, nb, 0);
2374
78.4k
            bcount = k;
2375
78.4k
            if (k) {
2376
72.4k
                if (k < 0)
2377
0
                    goto Fail;
2378
72.4k
                sortslice_memmove(&dest, 0, &ssb, 0, k);
2379
72.4k
                sortslice_advance(&dest, k);
2380
72.4k
                sortslice_advance(&ssb, k);
2381
72.4k
                nb -= k;
2382
72.4k
                if (nb == 0)
2383
17.7k
                    goto Succeed;
2384
72.4k
            }
2385
60.6k
            sortslice_copy_incr(&dest, &ssa);
2386
60.6k
            --na;
2387
60.6k
            if (na == 1)
2388
3.89k
                goto CopyB;
2389
60.6k
        } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
2390
18.8k
        ++min_gallop;           /* penalize it for leaving galloping mode */
2391
18.8k
        ms->min_gallop = min_gallop;
2392
18.8k
    }
2393
27.4k
Succeed:
2394
27.4k
    result = 0;
2395
27.4k
Fail:
2396
27.4k
    if (na)
2397
27.4k
        sortslice_memcpy(&dest, 0, &ssa, 0, na);
2398
27.4k
    return result;
2399
23.9k
CopyB:
2400
23.9k
    assert(na == 1 && nb > 0);
2401
    /* The last element of ssa belongs at the end of the merge. */
2402
23.9k
    sortslice_memmove(&dest, 0, &ssb, 0, nb);
2403
23.9k
    sortslice_copy(&dest, nb, &ssa, 0);
2404
23.9k
    return 0;
2405
27.4k
}
2406
2407
/* Merge the na elements starting at pa with the nb elements starting at
2408
 * ssb.keys = ssa.keys + na in a stable way, in-place.  na and nb must be > 0.
2409
 * Must also have that ssa.keys[na-1] belongs at the end of the merge, and
2410
 * should have na >= nb.  See listsort.txt for more info.  Return 0 if
2411
 * successful, -1 if error.
2412
 */
2413
static Py_ssize_t
2414
merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na,
2415
         sortslice ssb, Py_ssize_t nb)
2416
27.3k
{
2417
27.3k
    Py_ssize_t k;
2418
27.3k
    sortslice dest, basea, baseb;
2419
27.3k
    int result = -1;            /* guilty until proved innocent */
2420
27.3k
    Py_ssize_t min_gallop;
2421
2422
27.3k
    assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
2423
27.3k
    assert(ssa.keys + na == ssb.keys);
2424
27.3k
    if (MERGE_GETMEM(ms, nb) < 0)
2425
0
        return -1;
2426
27.3k
    dest = ssb;
2427
27.3k
    sortslice_advance(&dest, nb-1);
2428
27.3k
    sortslice_memcpy(&ms->a, 0, &ssb, 0, nb);
2429
27.3k
    basea = ssa;
2430
27.3k
    baseb = ms->a;
2431
27.3k
    ssb.keys = ms->a.keys + nb - 1;
2432
27.3k
    if (ssb.values != NULL)
2433
23.6k
        ssb.values = ms->a.values + nb - 1;
2434
27.3k
    sortslice_advance(&ssa, na - 1);
2435
2436
27.3k
    sortslice_copy_decr(&dest, &ssa);
2437
27.3k
    --na;
2438
27.3k
    if (na == 0)
2439
0
        goto Succeed;
2440
27.3k
    if (nb == 1)
2441
889
        goto CopyA;
2442
2443
26.4k
    min_gallop = ms->min_gallop;
2444
36.6k
    for (;;) {
2445
36.6k
        Py_ssize_t acount = 0;          /* # of times A won in a row */
2446
36.6k
        Py_ssize_t bcount = 0;          /* # of times B won in a row */
2447
2448
        /* Do the straightforward thing until (if ever) one run
2449
         * appears to win consistently.
2450
         */
2451
257k
        for (;;) {
2452
257k
            assert(na > 0 && nb > 1);
2453
257k
            k = ISLT(ssb.keys[0], ssa.keys[0]);
2454
257k
            if (k) {
2455
122k
                if (k < 0)
2456
0
                    goto Fail;
2457
122k
                sortslice_copy_decr(&dest, &ssa);
2458
122k
                ++acount;
2459
122k
                bcount = 0;
2460
122k
                --na;
2461
122k
                if (na == 0)
2462
700
                    goto Succeed;
2463
122k
                if (acount >= min_gallop)
2464
16.4k
                    break;
2465
122k
            }
2466
134k
            else {
2467
134k
                sortslice_copy_decr(&dest, &ssb);
2468
134k
                ++bcount;
2469
134k
                acount = 0;
2470
134k
                --nb;
2471
134k
                if (nb == 1)
2472
464
                    goto CopyA;
2473
134k
                if (bcount >= min_gallop)
2474
19.0k
                    break;
2475
134k
            }
2476
257k
        }
2477
2478
        /* One run is winning so consistently that galloping may
2479
         * be a huge win.  So try that, and continue galloping until
2480
         * (if ever) neither run appears to be winning consistently
2481
         * anymore.
2482
         */
2483
35.4k
        ++min_gallop;
2484
59.7k
        do {
2485
59.7k
            assert(na > 0 && nb > 1);
2486
59.7k
            min_gallop -= min_gallop > 1;
2487
59.7k
            ms->min_gallop = min_gallop;
2488
59.7k
            k = gallop_right(ms, ssb.keys[0], basea.keys, na, na-1);
2489
59.7k
            if (k < 0)
2490
0
                goto Fail;
2491
59.7k
            k = na - k;
2492
59.7k
            acount = k;
2493
59.7k
            if (k) {
2494
36.1k
                sortslice_advance(&dest, -k);
2495
36.1k
                sortslice_advance(&ssa, -k);
2496
36.1k
                sortslice_memmove(&dest, 1, &ssa, 1, k);
2497
36.1k
                na -= k;
2498
36.1k
                if (na == 0)
2499
11.7k
                    goto Succeed;
2500
36.1k
            }
2501
47.9k
            sortslice_copy_decr(&dest, &ssb);
2502
47.9k
            --nb;
2503
47.9k
            if (nb == 1)
2504
757
                goto CopyA;
2505
2506
47.2k
            k = gallop_left(ms, ssa.keys[0], baseb.keys, nb, nb-1);
2507
47.2k
            if (k < 0)
2508
0
                goto Fail;
2509
47.2k
            k = nb - k;
2510
47.2k
            bcount = k;
2511
47.2k
            if (k) {
2512
41.3k
                sortslice_advance(&dest, -k);
2513
41.3k
                sortslice_advance(&ssb, -k);
2514
41.3k
                sortslice_memcpy(&dest, 1, &ssb, 1, k);
2515
41.3k
                nb -= k;
2516
41.3k
                if (nb == 1)
2517
11.4k
                    goto CopyA;
2518
                /* nb==0 is impossible now if the comparison
2519
                 * function is consistent, but we can't assume
2520
                 * that it is.
2521
                 */
2522
29.8k
                if (nb == 0)
2523
0
                    goto Succeed;
2524
29.8k
            }
2525
35.7k
            sortslice_copy_decr(&dest, &ssa);
2526
35.7k
            --na;
2527
35.7k
            if (na == 0)
2528
1.35k
                goto Succeed;
2529
35.7k
        } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
2530
10.1k
        ++min_gallop;           /* penalize it for leaving galloping mode */
2531
10.1k
        ms->min_gallop = min_gallop;
2532
10.1k
    }
2533
13.7k
Succeed:
2534
13.7k
    result = 0;
2535
13.7k
Fail:
2536
13.7k
    if (nb)
2537
13.7k
        sortslice_memcpy(&dest, -(nb-1), &baseb, 0, nb);
2538
13.7k
    return result;
2539
13.5k
CopyA:
2540
13.5k
    assert(nb == 1 && na > 0);
2541
    /* The first element of ssb belongs at the front of the merge. */
2542
13.5k
    sortslice_memmove(&dest, 1-na, &ssa, 1-na, na);
2543
13.5k
    sortslice_advance(&dest, -na);
2544
13.5k
    sortslice_advance(&ssa, -na);
2545
13.5k
    sortslice_copy(&dest, 0, &ssb, 0);
2546
13.5k
    return 0;
2547
13.7k
}
2548
2549
/* Merge the two runs at stack indices i and i+1.
2550
 * Returns 0 on success, -1 on error.
2551
 */
2552
static Py_ssize_t
2553
merge_at(MergeState *ms, Py_ssize_t i)
2554
78.8k
{
2555
78.8k
    sortslice ssa, ssb;
2556
78.8k
    Py_ssize_t na, nb;
2557
78.8k
    Py_ssize_t k;
2558
2559
78.8k
    assert(ms != NULL);
2560
78.8k
    assert(ms->n >= 2);
2561
78.8k
    assert(i >= 0);
2562
78.8k
    assert(i == ms->n - 2 || i == ms->n - 3);
2563
2564
78.8k
    ssa = ms->pending[i].base;
2565
78.8k
    na = ms->pending[i].len;
2566
78.8k
    ssb = ms->pending[i+1].base;
2567
78.8k
    nb = ms->pending[i+1].len;
2568
78.8k
    assert(na > 0 && nb > 0);
2569
78.8k
    assert(ssa.keys + na == ssb.keys);
2570
2571
    /* Record the length of the combined runs; if i is the 3rd-last
2572
     * run now, also slide over the last run (which isn't involved
2573
     * in this merge).  The current run i+1 goes away in any case.
2574
     */
2575
78.8k
    ms->pending[i].len = na + nb;
2576
78.8k
    if (i == ms->n - 3)
2577
472
        ms->pending[i+1] = ms->pending[i+2];
2578
78.8k
    --ms->n;
2579
2580
    /* Where does b start in a?  Elements in a before that can be
2581
     * ignored (already in place).
2582
     */
2583
78.8k
    k = gallop_right(ms, *ssb.keys, ssa.keys, na, 0);
2584
78.8k
    if (k < 0)
2585
0
        return -1;
2586
78.8k
    sortslice_advance(&ssa, k);
2587
78.8k
    na -= k;
2588
78.8k
    if (na == 0)
2589
27
        return 0;
2590
2591
    /* Where does a end in b?  Elements in b after that can be
2592
     * ignored (already in place).
2593
     */
2594
78.8k
    nb = gallop_left(ms, ssa.keys[na-1], ssb.keys, nb, nb-1);
2595
78.8k
    if (nb <= 0)
2596
0
        return nb;
2597
2598
    /* Merge what remains of the runs, using a temp array with
2599
     * min(na, nb) elements.
2600
     */
2601
78.8k
    if (na <= nb)
2602
51.4k
        return merge_lo(ms, ssa, na, ssb, nb);
2603
27.3k
    else
2604
27.3k
        return merge_hi(ms, ssa, na, ssb, nb);
2605
78.8k
}
2606
2607
/* Two adjacent runs begin at index s1. The first run has length n1, and
2608
 * the second run (starting at index s1+n1) has length n2. The list has total
2609
 * length n.
2610
 * Compute the "power" of the first run. See listsort.txt for details.
2611
 */
2612
static int
2613
powerloop(Py_ssize_t s1, Py_ssize_t n1, Py_ssize_t n2, Py_ssize_t n)
2614
78.8k
{
2615
78.8k
    int result = 0;
2616
78.8k
    assert(s1 >= 0);
2617
78.8k
    assert(n1 > 0 && n2 > 0);
2618
78.8k
    assert(s1 + n1 + n2 <= n);
2619
    /* midpoints a and b:
2620
     * a = s1 + n1/2
2621
     * b = s1 + n1 + n2/2 = a + (n1 + n2)/2
2622
     *
2623
     * Those may not be integers, though, because of the "/2". So we work with
2624
     * 2*a and 2*b instead, which are necessarily integers. It makes no
2625
     * difference to the outcome, since the bits in the expansion of (2*i)/n
2626
     * are merely shifted one position from those of i/n.
2627
     */
2628
78.8k
    Py_ssize_t a = 2 * s1 + n1;  /* 2*a */
2629
78.8k
    Py_ssize_t b = a + n1 + n2;  /* 2*b */
2630
    /* Emulate a/n and b/n one bit a time, until bits differ. */
2631
316k
    for (;;) {
2632
316k
        ++result;
2633
316k
        if (a >= n) {  /* both quotient bits are 1 */
2634
122k
            assert(b >= a);
2635
122k
            a -= n;
2636
122k
            b -= n;
2637
122k
        }
2638
194k
        else if (b >= n) {  /* a/n bit is 0, b/n bit is 1 */
2639
78.8k
            break;
2640
78.8k
        } /* else both quotient bits are 0 */
2641
316k
        assert(a < b && b < n);
2642
237k
        a <<= 1;
2643
237k
        b <<= 1;
2644
237k
    }
2645
78.8k
    return result;
2646
78.8k
}
2647
2648
/* The next run has been identified, of length n2.
2649
 * If there's already a run on the stack, apply the "powersort" merge strategy:
2650
 * compute the topmost run's "power" (depth in a conceptual binary merge tree)
2651
 * and merge adjacent runs on the stack with greater power. See listsort.txt
2652
 * for more info.
2653
 *
2654
 * It's the caller's responsibility to push the new run on the stack when this
2655
 * returns.
2656
 *
2657
 * Returns 0 on success, -1 on error.
2658
 */
2659
static int
2660
found_new_run(MergeState *ms, Py_ssize_t n2)
2661
293k
{
2662
293k
    assert(ms);
2663
293k
    if (ms->n) {
2664
78.8k
        assert(ms->n > 0);
2665
78.8k
        struct s_slice *p = ms->pending;
2666
78.8k
        Py_ssize_t s1 = p[ms->n - 1].base.keys - ms->basekeys; /* start index */
2667
78.8k
        Py_ssize_t n1 = p[ms->n - 1].len;
2668
78.8k
        int power = powerloop(s1, n1, n2, ms->listlen);
2669
130k
        while (ms->n > 1 && p[ms->n - 2].power > power) {
2670
51.8k
            if (merge_at(ms, ms->n - 2) < 0)
2671
0
                return -1;
2672
51.8k
        }
2673
78.8k
        assert(ms->n < 2 || p[ms->n - 2].power < power);
2674
78.8k
        p[ms->n - 1].power = power;
2675
78.8k
    }
2676
293k
    return 0;
2677
293k
}
2678
2679
/* Regardless of invariants, merge all runs on the stack until only one
2680
 * remains.  This is used at the end of the mergesort.
2681
 *
2682
 * Returns 0 on success, -1 on error.
2683
 */
2684
static int
2685
merge_force_collapse(MergeState *ms)
2686
215k
{
2687
215k
    struct s_slice *p = ms->pending;
2688
2689
215k
    assert(ms);
2690
242k
    while (ms->n > 1) {
2691
27.0k
        Py_ssize_t n = ms->n - 2;
2692
27.0k
        if (n > 0 && p[n-1].len < p[n+1].len)
2693
472
            --n;
2694
27.0k
        if (merge_at(ms, n) < 0)
2695
0
            return -1;
2696
27.0k
    }
2697
215k
    return 0;
2698
215k
}
2699
2700
/* Return the next minrun value to use. See listsort.txt. */
2701
Py_LOCAL_INLINE(Py_ssize_t)
2702
minrun_next(MergeState *ms)
2703
293k
{
2704
293k
    ms->mr_current += ms->listlen;
2705
293k
    assert(ms->mr_current >= 0); /* no overflow */
2706
293k
    Py_ssize_t result = ms->mr_current >> ms->mr_e;
2707
293k
    ms->mr_current &= ms->mr_mask;
2708
293k
    return result;
2709
293k
}
2710
2711
/* Here we define custom comparison functions to optimize for the cases one commonly
2712
 * encounters in practice: homogeneous lists, often of one of the basic types. */
2713
2714
/* This struct holds the comparison function and helper functions
2715
 * selected in the pre-sort check. */
2716
2717
/* These are the special case compare functions.
2718
 * ms->key_compare will always point to one of these: */
2719
2720
/* Heterogeneous compare: default, always safe to fall back on. */
2721
static int
2722
safe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
2723
0
{
2724
    /* No assumptions necessary! */
2725
0
    return PyObject_RichCompareBool(v, w, Py_LT);
2726
0
}
2727
2728
/* Homogeneous compare: safe for any two comparable objects of the same type.
2729
 * (ms->key_richcompare is set to ob_type->tp_richcompare in the
2730
 *  pre-sort check.)
2731
 */
2732
static int
2733
unsafe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
2734
9.62M
{
2735
9.62M
    PyObject *res_obj; int res;
2736
2737
    /* No assumptions, because we check first: */
2738
9.62M
    if (Py_TYPE(v)->tp_richcompare != ms->key_richcompare)
2739
0
        return PyObject_RichCompareBool(v, w, Py_LT);
2740
2741
9.62M
    assert(ms->key_richcompare != NULL);
2742
9.62M
    res_obj = (*(ms->key_richcompare))(v, w, Py_LT);
2743
2744
9.62M
    if (res_obj == Py_NotImplemented) {
2745
0
        Py_DECREF(res_obj);
2746
0
        return PyObject_RichCompareBool(v, w, Py_LT);
2747
0
    }
2748
9.62M
    if (res_obj == NULL)
2749
0
        return -1;
2750
2751
9.62M
    if (PyBool_Check(res_obj)) {
2752
9.62M
        res = (res_obj == Py_True);
2753
9.62M
    }
2754
0
    else {
2755
0
        res = PyObject_IsTrue(res_obj);
2756
0
    }
2757
9.62M
    Py_DECREF(res_obj);
2758
2759
    /* Note that we can't assert
2760
     *     res == PyObject_RichCompareBool(v, w, Py_LT);
2761
     * because of evil compare functions like this:
2762
     *     lambda a, b:  int(random.random() * 3) - 1)
2763
     * (which is actually in test_sort.py) */
2764
9.62M
    return res;
2765
9.62M
}
2766
2767
/* Latin string compare: safe for any two latin (one byte per char) strings. */
2768
static int
2769
unsafe_latin_compare(PyObject *v, PyObject *w, MergeState *ms)
2770
289k
{
2771
289k
    Py_ssize_t len;
2772
289k
    int res;
2773
2774
    /* Modified from Objects/unicodeobject.c:unicode_compare, assuming: */
2775
289k
    assert(Py_IS_TYPE(v, &PyUnicode_Type));
2776
289k
    assert(Py_IS_TYPE(w, &PyUnicode_Type));
2777
289k
    assert(PyUnicode_KIND(v) == PyUnicode_KIND(w));
2778
289k
    assert(PyUnicode_KIND(v) == PyUnicode_1BYTE_KIND);
2779
2780
289k
    len = Py_MIN(PyUnicode_GET_LENGTH(v), PyUnicode_GET_LENGTH(w));
2781
289k
    res = memcmp(PyUnicode_DATA(v), PyUnicode_DATA(w), len);
2782
2783
289k
    res = (res != 0 ?
2784
283k
           res < 0 :
2785
289k
           PyUnicode_GET_LENGTH(v) < PyUnicode_GET_LENGTH(w));
2786
2787
289k
    assert(res == PyObject_RichCompareBool(v, w, Py_LT));;
2788
289k
    return res;
2789
289k
}
2790
2791
/* Bounded int compare: compare any two longs that fit in a single machine word. */
2792
static int
2793
unsafe_long_compare(PyObject *v, PyObject *w, MergeState *ms)
2794
6.39M
{
2795
6.39M
    PyLongObject *vl, *wl;
2796
6.39M
    intptr_t v0, w0;
2797
6.39M
    int res;
2798
2799
    /* Modified from Objects/longobject.c:long_compare, assuming: */
2800
6.39M
    assert(Py_IS_TYPE(v, &PyLong_Type));
2801
6.39M
    assert(Py_IS_TYPE(w, &PyLong_Type));
2802
6.39M
    assert(_PyLong_IsCompact((PyLongObject *)v));
2803
6.39M
    assert(_PyLong_IsCompact((PyLongObject *)w));
2804
2805
6.39M
    vl = (PyLongObject*)v;
2806
6.39M
    wl = (PyLongObject*)w;
2807
2808
6.39M
    v0 = _PyLong_CompactValue(vl);
2809
6.39M
    w0 = _PyLong_CompactValue(wl);
2810
2811
6.39M
    res = v0 < w0;
2812
6.39M
    assert(res == PyObject_RichCompareBool(v, w, Py_LT));
2813
6.39M
    return res;
2814
6.39M
}
2815
2816
/* Float compare: compare any two floats. */
2817
static int
2818
unsafe_float_compare(PyObject *v, PyObject *w, MergeState *ms)
2819
0
{
2820
0
    int res;
2821
2822
    /* Modified from Objects/floatobject.c:float_richcompare, assuming: */
2823
0
    assert(Py_IS_TYPE(v, &PyFloat_Type));
2824
0
    assert(Py_IS_TYPE(w, &PyFloat_Type));
2825
2826
0
    res = PyFloat_AS_DOUBLE(v) < PyFloat_AS_DOUBLE(w);
2827
0
    assert(res == PyObject_RichCompareBool(v, w, Py_LT));
2828
0
    return res;
2829
0
}
2830
2831
/* Tuple compare: compare *any* two tuples, using
2832
 * ms->tuple_elem_compare to compare the first elements, which is set
2833
 * using the same pre-sort check as we use for ms->key_compare,
2834
 * but run on the list [x[0] for x in L]. This allows us to optimize compares
2835
 * on two levels (as long as [x[0] for x in L] is type-homogeneous.) The idea is
2836
 * that most tuple compares don't involve x[1:]. */
2837
static int
2838
unsafe_tuple_compare(PyObject *v, PyObject *w, MergeState *ms)
2839
1.29k
{
2840
1.29k
    PyTupleObject *vt, *wt;
2841
1.29k
    Py_ssize_t i, vlen, wlen;
2842
1.29k
    int k;
2843
2844
    /* Modified from Objects/tupleobject.c:tuplerichcompare, assuming: */
2845
1.29k
    assert(Py_IS_TYPE(v, &PyTuple_Type));
2846
1.29k
    assert(Py_IS_TYPE(w, &PyTuple_Type));
2847
1.29k
    assert(Py_SIZE(v) > 0);
2848
1.29k
    assert(Py_SIZE(w) > 0);
2849
2850
1.29k
    vt = (PyTupleObject *)v;
2851
1.29k
    wt = (PyTupleObject *)w;
2852
2853
1.29k
    vlen = Py_SIZE(vt);
2854
1.29k
    wlen = Py_SIZE(wt);
2855
2856
1.30k
    for (i = 0; i < vlen && i < wlen; i++) {
2857
1.30k
        k = PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_EQ);
2858
1.30k
        if (k < 0)
2859
0
            return -1;
2860
1.30k
        if (!k)
2861
1.29k
            break;
2862
1.30k
    }
2863
2864
1.29k
    if (i >= vlen || i >= wlen)
2865
0
        return vlen < wlen;
2866
2867
1.29k
    if (i == 0)
2868
1.28k
        return ms->tuple_elem_compare(vt->ob_item[i], wt->ob_item[i], ms);
2869
12
    else
2870
12
        return PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_LT);
2871
1.29k
}
2872
2873
/* An adaptive, stable, natural mergesort.  See listsort.txt.
2874
 * Returns Py_None on success, NULL on error.  Even in case of error, the
2875
 * list will be some permutation of its input state (nothing is lost or
2876
 * duplicated).
2877
 */
2878
/*[clinic input]
2879
@permit_long_docstring_body
2880
@critical_section
2881
list.sort
2882
2883
    *
2884
    key as keyfunc: object = None
2885
    reverse: bool = False
2886
2887
Sort the list in ascending order and return None.
2888
2889
The sort is in-place (i.e. the list itself is modified) and stable (i.e. the
2890
order of two equal elements is maintained).
2891
2892
If a key function is given, apply it once to each list item and sort them,
2893
ascending or descending, according to their function values.
2894
2895
The reverse flag can be set to sort in descending order.
2896
[clinic start generated code]*/
2897
2898
static PyObject *
2899
list_sort_impl(PyListObject *self, PyObject *keyfunc, int reverse)
2900
/*[clinic end generated code: output=57b9f9c5e23fbe42 input=e4f6b6069181ad7d]*/
2901
805k
{
2902
805k
    MergeState ms;
2903
805k
    Py_ssize_t nremaining;
2904
805k
    Py_ssize_t minrun;
2905
805k
    sortslice lo;
2906
805k
    Py_ssize_t saved_ob_size, saved_allocated;
2907
805k
    PyObject **saved_ob_item;
2908
805k
    PyObject **final_ob_item;
2909
805k
    PyObject *result = NULL;            /* guilty until proved innocent */
2910
805k
    Py_ssize_t i;
2911
805k
    PyObject **keys;
2912
2913
805k
    assert(self != NULL);
2914
805k
    assert(PyList_Check(self));
2915
805k
    if (keyfunc == Py_None)
2916
211k
        keyfunc = NULL;
2917
2918
    /* The list is temporarily made empty, so that mutations performed
2919
     * by comparison functions can't affect the slice of memory we're
2920
     * sorting (allowing mutations during sorting is a core-dump
2921
     * factory, since ob_item may change).
2922
     */
2923
805k
    saved_ob_size = Py_SIZE(self);
2924
805k
    saved_ob_item = self->ob_item;
2925
805k
    saved_allocated = self->allocated;
2926
805k
    Py_SET_SIZE(self, 0);
2927
805k
    FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item, NULL);
2928
805k
    self->allocated = -1; /* any operation will reset it to >= 0 */
2929
2930
805k
    if (keyfunc == NULL) {
2931
227k
        keys = NULL;
2932
227k
        lo.keys = saved_ob_item;
2933
227k
        lo.values = NULL;
2934
227k
    }
2935
578k
    else {
2936
578k
        if (saved_ob_size < MERGESTATE_TEMP_SIZE/2)
2937
            /* Leverage stack space we allocated but won't otherwise use */
2938
573k
            keys = &ms.temparray[saved_ob_size+1];
2939
4.26k
        else {
2940
4.26k
            keys = PyMem_Malloc(sizeof(PyObject *) * saved_ob_size);
2941
4.26k
            if (keys == NULL) {
2942
0
                PyErr_NoMemory();
2943
0
                goto keyfunc_fail;
2944
0
            }
2945
4.26k
        }
2946
2947
5.77M
        for (i = 0; i < saved_ob_size ; i++) {
2948
5.19M
            keys[i] = PyObject_CallOneArg(keyfunc, saved_ob_item[i]);
2949
5.19M
            if (keys[i] == NULL) {
2950
0
                for (i=i-1 ; i>=0 ; i--)
2951
0
                    Py_DECREF(keys[i]);
2952
0
                if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2)
2953
0
                    PyMem_Free(keys);
2954
0
                goto keyfunc_fail;
2955
0
            }
2956
5.19M
        }
2957
2958
578k
        lo.keys = keys;
2959
578k
        lo.values = saved_ob_item;
2960
578k
    }
2961
2962
2963
    /* The pre-sort check: here's where we decide which compare function to use.
2964
     * How much optimization is safe? We test for homogeneity with respect to
2965
     * several properties that are expensive to check at compare-time, and
2966
     * set ms appropriately. */
2967
805k
    if (saved_ob_size > 1) {
2968
        /* Assume the first element is representative of the whole list. */
2969
215k
        int keys_are_in_tuples = (Py_IS_TYPE(lo.keys[0], &PyTuple_Type) &&
2970
69
                                  Py_SIZE(lo.keys[0]) > 0);
2971
2972
215k
        PyTypeObject* key_type = (keys_are_in_tuples ?
2973
69
                                  Py_TYPE(PyTuple_GET_ITEM(lo.keys[0], 0)) :
2974
215k
                                  Py_TYPE(lo.keys[0]));
2975
2976
215k
        int keys_are_all_same_type = 1;
2977
215k
        int strings_are_latin = 1;
2978
215k
        int ints_are_bounded = 1;
2979
2980
        /* Prove that assumption by checking every key. */
2981
6.21M
        for (i=0; i < saved_ob_size; i++) {
2982
2983
5.99M
            if (keys_are_in_tuples &&
2984
615
                !(Py_IS_TYPE(lo.keys[i], &PyTuple_Type) && Py_SIZE(lo.keys[i]) != 0)) {
2985
0
                keys_are_in_tuples = 0;
2986
0
                keys_are_all_same_type = 0;
2987
0
                break;
2988
0
            }
2989
2990
            /* Note: for lists of tuples, key is the first element of the tuple
2991
             * lo.keys[i], not lo.keys[i] itself! We verify type-homogeneity
2992
             * for lists of tuples in the if-statement directly above. */
2993
5.99M
            PyObject *key = (keys_are_in_tuples ?
2994
615
                             PyTuple_GET_ITEM(lo.keys[i], 0) :
2995
5.99M
                             lo.keys[i]);
2996
2997
5.99M
            if (!Py_IS_TYPE(key, key_type)) {
2998
0
                keys_are_all_same_type = 0;
2999
                /* If keys are in tuple we must loop over the whole list to make
3000
                   sure all items are tuples */
3001
0
                if (!keys_are_in_tuples) {
3002
0
                    break;
3003
0
                }
3004
0
            }
3005
3006
5.99M
            if (keys_are_all_same_type) {
3007
5.99M
                if (key_type == &PyLong_Type &&
3008
4.69M
                    ints_are_bounded &&
3009
2.58M
                    !_PyLong_IsCompact((PyLongObject *)key)) {
3010
3011
7.03k
                    ints_are_bounded = 0;
3012
7.03k
                }
3013
5.99M
                else if (key_type == &PyUnicode_Type &&
3014
185k
                         strings_are_latin &&
3015
106k
                         PyUnicode_KIND(key) != PyUnicode_1BYTE_KIND) {
3016
3017
6.48k
                        strings_are_latin = 0;
3018
6.48k
                    }
3019
5.99M
                }
3020
5.99M
            }
3021
3022
        /* Choose the best compare, given what we now know about the keys. */
3023
215k
        if (keys_are_all_same_type) {
3024
3025
215k
            if (key_type == &PyUnicode_Type && strings_are_latin) {
3026
13.1k
                ms.key_compare = unsafe_latin_compare;
3027
13.1k
            }
3028
201k
            else if (key_type == &PyLong_Type && ints_are_bounded) {
3029
95.4k
                ms.key_compare = unsafe_long_compare;
3030
95.4k
            }
3031
106k
            else if (key_type == &PyFloat_Type) {
3032
0
                ms.key_compare = unsafe_float_compare;
3033
0
            }
3034
106k
            else if ((ms.key_richcompare = key_type->tp_richcompare) != NULL) {
3035
106k
                ms.key_compare = unsafe_object_compare;
3036
106k
            }
3037
0
            else {
3038
0
                ms.key_compare = safe_object_compare;
3039
0
            }
3040
215k
        }
3041
0
        else {
3042
0
            ms.key_compare = safe_object_compare;
3043
0
        }
3044
3045
215k
        if (keys_are_in_tuples) {
3046
            /* Make sure we're not dealing with tuples of tuples
3047
             * (remember: here, key_type refers list [key[0] for key in keys]) */
3048
69
            if (key_type == &PyTuple_Type) {
3049
0
                ms.tuple_elem_compare = safe_object_compare;
3050
0
            }
3051
69
            else {
3052
69
                ms.tuple_elem_compare = ms.key_compare;
3053
69
            }
3054
3055
69
            ms.key_compare = unsafe_tuple_compare;
3056
69
        }
3057
215k
    }
3058
    /* End of pre-sort check: ms is now set properly! */
3059
3060
805k
    merge_init(&ms, saved_ob_size, keys != NULL, &lo);
3061
3062
805k
    nremaining = saved_ob_size;
3063
805k
    if (nremaining < 2)
3064
590k
        goto succeed;
3065
3066
    /* Reverse sort stability achieved by initially reversing the list,
3067
    applying a stable forward sort, then reversing the final result. */
3068
215k
    if (reverse) {
3069
2
        if (keys != NULL)
3070
0
            reverse_slice(&keys[0], &keys[saved_ob_size]);
3071
2
        reverse_slice(&saved_ob_item[0], &saved_ob_item[saved_ob_size]);
3072
2
    }
3073
3074
    /* March over the array once, left to right, finding natural runs,
3075
     * and extending short natural runs to minrun elements.
3076
     */
3077
293k
    do {
3078
293k
        Py_ssize_t n;
3079
3080
        /* Identify next run. */
3081
293k
        n = count_run(&ms, &lo, nremaining);
3082
293k
        if (n < 0)
3083
0
            goto fail;
3084
        /* If short, extend to min(minrun, nremaining). */
3085
293k
        minrun = minrun_next(&ms);
3086
293k
        if (n < minrun) {
3087
132k
            const Py_ssize_t force = nremaining <= minrun ?
3088
80.4k
                              nremaining : minrun;
3089
132k
            if (binarysort(&ms, &lo, force, n) < 0)
3090
0
                goto fail;
3091
132k
            n = force;
3092
132k
        }
3093
        /* Maybe merge pending runs. */
3094
293k
        assert(ms.n == 0 || ms.pending[ms.n -1].base.keys +
3095
293k
                            ms.pending[ms.n-1].len == lo.keys);
3096
293k
        if (found_new_run(&ms, n) < 0)
3097
0
            goto fail;
3098
        /* Push new run on stack. */
3099
293k
        assert(ms.n < MAX_MERGE_PENDING);
3100
293k
        ms.pending[ms.n].base = lo;
3101
293k
        ms.pending[ms.n].len = n;
3102
293k
        ++ms.n;
3103
        /* Advance to find next run. */
3104
293k
        sortslice_advance(&lo, n);
3105
293k
        nremaining -= n;
3106
293k
    } while (nremaining);
3107
3108
215k
    if (merge_force_collapse(&ms) < 0)
3109
0
        goto fail;
3110
215k
    assert(ms.n == 1);
3111
215k
    assert(keys == NULL
3112
215k
           ? ms.pending[0].base.keys == saved_ob_item
3113
215k
           : ms.pending[0].base.keys == &keys[0]);
3114
215k
    assert(ms.pending[0].len == saved_ob_size);
3115
215k
    lo = ms.pending[0].base;
3116
3117
805k
succeed:
3118
805k
    result = Py_None;
3119
805k
fail:
3120
805k
    if (keys != NULL) {
3121
5.77M
        for (i = 0; i < saved_ob_size; i++)
3122
5.19M
            Py_DECREF(keys[i]);
3123
578k
        if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2)
3124
4.26k
            PyMem_Free(keys);
3125
578k
    }
3126
3127
805k
    if (self->allocated != -1 && result != NULL) {
3128
        /* The user mucked with the list during the sort,
3129
         * and we don't already have another error to report.
3130
         */
3131
0
        PyErr_SetString(PyExc_ValueError, "list modified during sort");
3132
0
        result = NULL;
3133
0
    }
3134
3135
805k
    if (reverse && saved_ob_size > 1)
3136
2
        reverse_slice(saved_ob_item, saved_ob_item + saved_ob_size);
3137
3138
805k
    merge_freemem(&ms);
3139
3140
805k
keyfunc_fail:
3141
805k
    final_ob_item = self->ob_item;
3142
805k
    i = Py_SIZE(self);
3143
805k
    Py_SET_SIZE(self, saved_ob_size);
3144
805k
    FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item, saved_ob_item);
3145
805k
    FT_ATOMIC_STORE_SSIZE_RELAXED(self->allocated, saved_allocated);
3146
805k
    if (final_ob_item != NULL) {
3147
        /* we cannot use list_clear() for this because it does not
3148
           guarantee that the list is really empty when it returns */
3149
0
        while (--i >= 0) {
3150
0
            Py_XDECREF(final_ob_item[i]);
3151
0
        }
3152
#ifdef Py_GIL_DISABLED
3153
        ensure_shared_on_resize(self);
3154
        bool use_qsbr = _PyObject_GC_IS_SHARED(self);
3155
#else
3156
0
        bool use_qsbr = false;
3157
0
#endif
3158
0
        free_list_items(final_ob_item, use_qsbr);
3159
0
    }
3160
805k
    return Py_XNewRef(result);
3161
805k
}
3162
#undef IFLT
3163
#undef ISLT
3164
3165
int
3166
PyList_Sort(PyObject *v)
3167
15.7k
{
3168
15.7k
    if (v == NULL || !PyList_Check(v)) {
3169
0
        PyErr_BadInternalCall();
3170
0
        return -1;
3171
0
    }
3172
15.7k
    Py_BEGIN_CRITICAL_SECTION(v);
3173
15.7k
    v = list_sort_impl((PyListObject *)v, NULL, 0);
3174
15.7k
    Py_END_CRITICAL_SECTION();
3175
15.7k
    if (v == NULL)
3176
0
        return -1;
3177
15.7k
    Py_DECREF(v);
3178
15.7k
    return 0;
3179
15.7k
}
3180
3181
/*[clinic input]
3182
@critical_section
3183
list.reverse
3184
3185
Reverse *IN PLACE*.
3186
[clinic start generated code]*/
3187
3188
static PyObject *
3189
list_reverse_impl(PyListObject *self)
3190
/*[clinic end generated code: output=482544fc451abea9 input=04ac8e0c6a66e4d9]*/
3191
0
{
3192
0
    if (Py_SIZE(self) > 1)
3193
0
        reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
3194
0
    Py_RETURN_NONE;
3195
0
}
3196
3197
int
3198
PyList_Reverse(PyObject *v)
3199
50
{
3200
50
    PyListObject *self = (PyListObject *)v;
3201
3202
50
    if (v == NULL || !PyList_Check(v)) {
3203
0
        PyErr_BadInternalCall();
3204
0
        return -1;
3205
0
    }
3206
50
    Py_BEGIN_CRITICAL_SECTION(self);
3207
50
    if (Py_SIZE(self) > 1) {
3208
50
        reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
3209
50
    }
3210
50
    Py_END_CRITICAL_SECTION()
3211
50
    return 0;
3212
50
}
3213
3214
PyObject *
3215
PyList_AsTuple(PyObject *v)
3216
286k
{
3217
286k
    if (v == NULL || !PyList_Check(v)) {
3218
0
        PyErr_BadInternalCall();
3219
0
        return NULL;
3220
0
    }
3221
286k
    PyObject *ret;
3222
286k
    PyListObject *self = (PyListObject *)v;
3223
286k
    Py_BEGIN_CRITICAL_SECTION(self);
3224
286k
    ret = PyTuple_FromArray(self->ob_item, Py_SIZE(v));
3225
286k
    Py_END_CRITICAL_SECTION();
3226
286k
    return ret;
3227
286k
}
3228
3229
PyObject *
3230
_PyList_AsTupleAndClear(PyListObject *self)
3231
24
{
3232
24
    assert(self != NULL);
3233
24
    PyObject *ret;
3234
24
    if (self->ob_item == NULL) {
3235
0
        return PyTuple_New(0);
3236
0
    }
3237
24
    Py_BEGIN_CRITICAL_SECTION(self);
3238
24
    PyObject **items = self->ob_item;
3239
24
    Py_ssize_t size = Py_SIZE(self);
3240
24
    self->ob_item = NULL;
3241
24
    Py_SET_SIZE(self, 0);
3242
24
    ret = _PyTuple_FromArraySteal(items, size);
3243
24
    free_list_items(items, false);
3244
24
    Py_END_CRITICAL_SECTION();
3245
24
    return ret;
3246
24
}
3247
3248
PyObject *
3249
_PyList_FromStackRefStealOnSuccess(const _PyStackRef *src, Py_ssize_t n)
3250
174M
{
3251
174M
    if (n == 0) {
3252
158M
        return PyList_New(0);
3253
158M
    }
3254
3255
15.9M
    PyListObject *list = (PyListObject *)PyList_New(n);
3256
15.9M
    if (list == NULL) {
3257
0
        return NULL;
3258
0
    }
3259
3260
15.9M
    PyObject **dst = list->ob_item;
3261
44.3M
    for (Py_ssize_t i = 0; i < n; i++) {
3262
28.3M
        dst[i] = PyStackRef_AsPyObjectSteal(src[i]);
3263
28.3M
    }
3264
3265
15.9M
    return (PyObject *)list;
3266
15.9M
}
3267
3268
/*[clinic input]
3269
list.index
3270
3271
    value: object
3272
    start: slice_index(accept={int}) = 0
3273
    stop: slice_index(accept={int}, c_default="PY_SSIZE_T_MAX") = sys.maxsize
3274
    /
3275
3276
Return first index of value.
3277
3278
Raises ValueError if the value is not present.
3279
[clinic start generated code]*/
3280
3281
static PyObject *
3282
list_index_impl(PyListObject *self, PyObject *value, Py_ssize_t start,
3283
                Py_ssize_t stop)
3284
/*[clinic end generated code: output=ec51b88787e4e481 input=40ec5826303a0eb1]*/
3285
0
{
3286
0
    if (start < 0) {
3287
0
        start += Py_SIZE(self);
3288
0
        if (start < 0)
3289
0
            start = 0;
3290
0
    }
3291
0
    if (stop < 0) {
3292
0
        stop += Py_SIZE(self);
3293
0
        if (stop < 0)
3294
0
            stop = 0;
3295
0
    }
3296
0
    for (Py_ssize_t i = start; i < stop; i++) {
3297
0
        PyObject *obj = list_get_item_ref(self, i);
3298
0
        if (obj == NULL) {
3299
            // out-of-bounds
3300
0
            break;
3301
0
        }
3302
0
        int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
3303
0
        Py_DECREF(obj);
3304
0
        if (cmp > 0)
3305
0
            return PyLong_FromSsize_t(i);
3306
0
        else if (cmp < 0)
3307
0
            return NULL;
3308
0
    }
3309
0
    PyErr_SetString(PyExc_ValueError, "list.index(x): x not in list");
3310
0
    return NULL;
3311
0
}
3312
3313
/*[clinic input]
3314
list.count
3315
3316
     value: object
3317
     /
3318
3319
Return number of occurrences of value.
3320
[clinic start generated code]*/
3321
3322
static PyObject *
3323
list_count_impl(PyListObject *self, PyObject *value)
3324
/*[clinic end generated code: output=eff66f14aef2df86 input=3bdc3a5e6f749565]*/
3325
0
{
3326
0
    Py_ssize_t count = 0;
3327
0
    for (Py_ssize_t i = 0; ; i++) {
3328
0
        PyObject *obj = list_get_item_ref(self, i);
3329
0
        if (obj == NULL) {
3330
            // out-of-bounds
3331
0
            break;
3332
0
        }
3333
0
        if (obj == value) {
3334
0
           count++;
3335
0
           Py_DECREF(obj);
3336
0
           continue;
3337
0
        }
3338
0
        int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
3339
0
        Py_DECREF(obj);
3340
0
        if (cmp > 0)
3341
0
            count++;
3342
0
        else if (cmp < 0)
3343
0
            return NULL;
3344
0
    }
3345
0
    return PyLong_FromSsize_t(count);
3346
0
}
3347
3348
/*[clinic input]
3349
@critical_section
3350
list.remove
3351
3352
     value: object
3353
     /
3354
3355
Remove first occurrence of value.
3356
3357
Raises ValueError if the value is not present.
3358
[clinic start generated code]*/
3359
3360
static PyObject *
3361
list_remove_impl(PyListObject *self, PyObject *value)
3362
/*[clinic end generated code: output=b9b76a6633b18778 input=26c813dbb95aa93b]*/
3363
3.26k
{
3364
3.26k
    Py_ssize_t i;
3365
3366
3.27k
    for (i = 0; i < Py_SIZE(self); i++) {
3367
3.27k
        PyObject *obj = self->ob_item[i];
3368
3.27k
        Py_INCREF(obj);
3369
3.27k
        int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
3370
3.27k
        Py_DECREF(obj);
3371
3.27k
        if (cmp > 0) {
3372
3.26k
            if (list_ass_slice_lock_held(self, i, i+1, NULL) == 0)
3373
3.26k
                Py_RETURN_NONE;
3374
0
            return NULL;
3375
3.26k
        }
3376
10
        else if (cmp < 0)
3377
0
            return NULL;
3378
3.27k
    }
3379
2
    PyErr_SetString(PyExc_ValueError, "list.remove(x): x not in list");
3380
2
    return NULL;
3381
3.26k
}
3382
3383
static int
3384
list_traverse(PyObject *self, visitproc visit, void *arg)
3385
59.3M
{
3386
59.3M
    PyListObject *o = (PyListObject *)self;
3387
59.3M
    Py_ssize_t i;
3388
3389
760M
    for (i = Py_SIZE(o); --i >= 0; )
3390
701M
        Py_VISIT(o->ob_item[i]);
3391
59.3M
    return 0;
3392
59.3M
}
3393
3394
static PyObject *
3395
list_richcompare_impl(PyObject *v, PyObject *w, int op)
3396
4.59k
{
3397
4.59k
    PyListObject *vl, *wl;
3398
4.59k
    Py_ssize_t i;
3399
3400
4.59k
    if (!PyList_Check(v) || !PyList_Check(w))
3401
787
        Py_RETURN_NOTIMPLEMENTED;
3402
3403
3.80k
    vl = (PyListObject *)v;
3404
3.80k
    wl = (PyListObject *)w;
3405
3406
3.80k
    if (Py_SIZE(vl) != Py_SIZE(wl) && (op == Py_EQ || op == Py_NE)) {
3407
        /* Shortcut: if the lengths differ, the lists differ */
3408
476
        if (op == Py_EQ)
3409
476
            Py_RETURN_FALSE;
3410
0
        else
3411
0
            Py_RETURN_TRUE;
3412
476
    }
3413
3414
    /* Search for the first index where items are different */
3415
3.53k
    for (i = 0; i < Py_SIZE(vl) && i < Py_SIZE(wl); i++) {
3416
412
        PyObject *vitem = vl->ob_item[i];
3417
412
        PyObject *witem = wl->ob_item[i];
3418
412
        if (vitem == witem) {
3419
158
            continue;
3420
158
        }
3421
3422
254
        Py_INCREF(vitem);
3423
254
        Py_INCREF(witem);
3424
254
        int k = PyObject_RichCompareBool(vitem, witem, Py_EQ);
3425
254
        Py_DECREF(vitem);
3426
254
        Py_DECREF(witem);
3427
254
        if (k < 0)
3428
0
            return NULL;
3429
254
        if (!k)
3430
206
            break;
3431
254
    }
3432
3433
3.32k
    if (i >= Py_SIZE(vl) || i >= Py_SIZE(wl)) {
3434
        /* No more items to compare -- compare sizes */
3435
3.12k
        Py_RETURN_RICHCOMPARE(Py_SIZE(vl), Py_SIZE(wl), op);
3436
3.12k
    }
3437
3438
    /* We have an item that differs -- shortcuts for EQ/NE */
3439
206
    if (op == Py_EQ) {
3440
194
        Py_RETURN_FALSE;
3441
194
    }
3442
12
    if (op == Py_NE) {
3443
12
        Py_RETURN_TRUE;
3444
12
    }
3445
3446
    /* Compare the final item again using the proper operator */
3447
0
    PyObject *vitem = vl->ob_item[i];
3448
0
    PyObject *witem = wl->ob_item[i];
3449
0
    Py_INCREF(vitem);
3450
0
    Py_INCREF(witem);
3451
0
    PyObject *result = PyObject_RichCompare(vl->ob_item[i], wl->ob_item[i], op);
3452
0
    Py_DECREF(vitem);
3453
0
    Py_DECREF(witem);
3454
0
    return result;
3455
12
}
3456
3457
static PyObject *
3458
list_richcompare(PyObject *v, PyObject *w, int op)
3459
4.59k
{
3460
4.59k
    PyObject *ret;
3461
4.59k
    Py_BEGIN_CRITICAL_SECTION2(v, w);
3462
4.59k
    ret = list_richcompare_impl(v, w, op);
3463
4.59k
    Py_END_CRITICAL_SECTION2()
3464
4.59k
    return ret;
3465
4.59k
}
3466
3467
/*[clinic input]
3468
list.__init__
3469
3470
    iterable: object(c_default="NULL") = ()
3471
    /
3472
3473
Built-in mutable sequence.
3474
3475
If no argument is given, the constructor creates a new empty list.
3476
The argument must be an iterable if specified.
3477
[clinic start generated code]*/
3478
3479
static int
3480
list___init___impl(PyListObject *self, PyObject *iterable)
3481
/*[clinic end generated code: output=0f3c21379d01de48 input=b3f3fe7206af8f6b]*/
3482
22.3M
{
3483
    /* Verify list invariants established by PyType_GenericAlloc() */
3484
22.3M
    assert(0 <= Py_SIZE(self));
3485
22.3M
    assert(Py_SIZE(self) <= self->allocated || self->allocated == -1);
3486
22.3M
    assert(self->ob_item != NULL ||
3487
22.3M
           self->allocated == 0 || self->allocated == -1);
3488
3489
    /* Empty previous contents */
3490
22.3M
    if (self->ob_item != NULL) {
3491
0
        Py_BEGIN_CRITICAL_SECTION(self);
3492
0
        list_clear(self);
3493
0
        Py_END_CRITICAL_SECTION();
3494
0
    }
3495
22.3M
    if (iterable != NULL) {
3496
10.3M
        if (_list_extend(self, iterable) < 0) {
3497
0
            return -1;
3498
0
        }
3499
10.3M
    }
3500
22.3M
    return 0;
3501
22.3M
}
3502
3503
static PyObject *
3504
list_vectorcall(PyObject *type, PyObject * const*args,
3505
                size_t nargsf, PyObject *kwnames)
3506
10.3M
{
3507
10.3M
    if (!_PyArg_NoKwnames("list", kwnames)) {
3508
0
        return NULL;
3509
0
    }
3510
10.3M
    Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
3511
10.3M
    if (!_PyArg_CheckPositional("list", nargs, 0, 1)) {
3512
0
        return NULL;
3513
0
    }
3514
3515
10.3M
    PyObject *list = PyType_GenericAlloc(_PyType_CAST(type), 0);
3516
10.3M
    if (list == NULL) {
3517
0
        return NULL;
3518
0
    }
3519
10.3M
    if (nargs) {
3520
10.3M
        if (list___init___impl((PyListObject *)list, args[0])) {
3521
0
            Py_DECREF(list);
3522
0
            return NULL;
3523
0
        }
3524
10.3M
    }
3525
10.3M
    return list;
3526
10.3M
}
3527
3528
3529
/*[clinic input]
3530
list.__sizeof__
3531
3532
Return the size of the list in memory, in bytes.
3533
[clinic start generated code]*/
3534
3535
static PyObject *
3536
list___sizeof___impl(PyListObject *self)
3537
/*[clinic end generated code: output=3417541f95f9a53e input=b8030a5d5ce8a187]*/
3538
0
{
3539
0
    size_t res = _PyObject_SIZE(Py_TYPE(self));
3540
0
    Py_ssize_t allocated = FT_ATOMIC_LOAD_SSIZE_RELAXED(self->allocated);
3541
0
    res += (size_t)allocated * sizeof(void*);
3542
0
    return PyLong_FromSize_t(res);
3543
0
}
3544
3545
static PyObject *list_iter(PyObject *seq);
3546
static PyObject *list_subscript(PyObject*, PyObject*);
3547
3548
static PyMethodDef list_methods[] = {
3549
    {"__getitem__", list_subscript, METH_O|METH_COEXIST,
3550
     PyDoc_STR("__getitem__($self, index, /)\n--\n\nReturn self[index].")},
3551
    LIST___REVERSED___METHODDEF
3552
    LIST___SIZEOF___METHODDEF
3553
    PY_LIST_CLEAR_METHODDEF
3554
    LIST_COPY_METHODDEF
3555
    LIST_APPEND_METHODDEF
3556
    LIST_INSERT_METHODDEF
3557
    LIST_EXTEND_METHODDEF
3558
    LIST_POP_METHODDEF
3559
    LIST_REMOVE_METHODDEF
3560
    LIST_INDEX_METHODDEF
3561
    LIST_COUNT_METHODDEF
3562
    LIST_REVERSE_METHODDEF
3563
    LIST_SORT_METHODDEF
3564
    {"__class_getitem__", Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")},
3565
    {NULL,              NULL}           /* sentinel */
3566
};
3567
3568
static PySequenceMethods list_as_sequence = {
3569
    list_length,                                /* sq_length */
3570
    list_concat,                                /* sq_concat */
3571
    list_repeat,                                /* sq_repeat */
3572
    list_item,                                  /* sq_item */
3573
    0,                                          /* sq_slice */
3574
    list_ass_item,                              /* sq_ass_item */
3575
    0,                                          /* sq_ass_slice */
3576
    list_contains,                              /* sq_contains */
3577
    list_inplace_concat,                        /* sq_inplace_concat */
3578
    list_inplace_repeat,                        /* sq_inplace_repeat */
3579
};
3580
3581
static inline PyObject *
3582
list_slice_step_lock_held(PyListObject *a, Py_ssize_t start, Py_ssize_t step, Py_ssize_t len)
3583
48
{
3584
48
    PyListObject *np = (PyListObject *)list_new_prealloc(len);
3585
48
    if (np == NULL) {
3586
0
        return NULL;
3587
0
    }
3588
48
    size_t cur;
3589
48
    Py_ssize_t i;
3590
48
    PyObject **src = a->ob_item;
3591
48
    PyObject **dest = np->ob_item;
3592
480
    for (cur = start, i = 0; i < len;
3593
432
            cur += (size_t)step, i++) {
3594
432
        PyObject *v = src[cur];
3595
432
        dest[i] = Py_NewRef(v);
3596
432
    }
3597
48
    Py_SET_SIZE(np, len);
3598
48
    return (PyObject *)np;
3599
48
}
3600
3601
static PyObject *
3602
list_slice_wrap(PyListObject *aa, Py_ssize_t start, Py_ssize_t stop, Py_ssize_t step)
3603
2.54M
{
3604
2.54M
    PyObject *res = NULL;
3605
2.54M
    Py_BEGIN_CRITICAL_SECTION(aa);
3606
2.54M
    Py_ssize_t len = PySlice_AdjustIndices(Py_SIZE(aa), &start, &stop, step);
3607
2.54M
    if (len <= 0) {
3608
387k
        res = PyList_New(0);
3609
387k
    }
3610
2.15M
    else if (step == 1) {
3611
2.15M
        res = list_slice_lock_held(aa, start, stop);
3612
2.15M
    }
3613
48
    else {
3614
48
        res = list_slice_step_lock_held(aa, start, step, len);
3615
48
    }
3616
2.54M
    Py_END_CRITICAL_SECTION();
3617
2.54M
    return res;
3618
2.54M
}
3619
3620
static inline PyObject*
3621
list_slice_subscript(PyObject* self, PyObject* item)
3622
2.54M
{
3623
2.54M
    assert(PyList_Check(self));
3624
2.54M
    assert(PySlice_Check(item));
3625
2.54M
    Py_ssize_t start, stop, step;
3626
2.54M
    if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
3627
0
        return NULL;
3628
0
    }
3629
2.54M
    return list_slice_wrap((PyListObject *)self, start, stop, step);
3630
2.54M
}
3631
3632
PyObject *
3633
_PyList_SliceSubscript(PyObject* _self, PyObject* item)
3634
2.39M
{
3635
2.39M
    return list_slice_subscript(_self, item);
3636
2.39M
}
3637
3638
static PyObject *
3639
list_subscript(PyObject* _self, PyObject* item)
3640
16.3M
{
3641
16.3M
    PyListObject* self = (PyListObject*)_self;
3642
16.3M
    if (_PyIndex_Check(item)) {
3643
16.1M
        Py_ssize_t i;
3644
16.1M
        i = PyNumber_AsSsize_t(item, PyExc_IndexError);
3645
16.1M
        if (i == -1 && PyErr_Occurred())
3646
0
            return NULL;
3647
16.1M
        if (i < 0)
3648
11.9M
            i += PyList_GET_SIZE(self);
3649
16.1M
        return list_item((PyObject *)self, i);
3650
16.1M
    }
3651
147k
    else if (PySlice_Check(item)) {
3652
147k
        return list_slice_subscript(_self, item);
3653
147k
    }
3654
0
    else {
3655
0
        PyErr_Format(PyExc_TypeError,
3656
0
                     "list indices must be integers or slices, not %.200s",
3657
0
                     Py_TYPE(item)->tp_name);
3658
0
        return NULL;
3659
0
    }
3660
16.3M
}
3661
3662
static Py_ssize_t
3663
adjust_slice_indexes(PyListObject *lst,
3664
                     Py_ssize_t *start, Py_ssize_t *stop,
3665
                     Py_ssize_t step)
3666
233k
{
3667
233k
    Py_ssize_t slicelength = PySlice_AdjustIndices(Py_SIZE(lst), start, stop,
3668
233k
                                                   step);
3669
3670
    /* Make sure s[5:2] = [..] inserts at the right place:
3671
        before 5, not before 2. */
3672
233k
    if ((step < 0 && *start < *stop) ||
3673
233k
        (step > 0 && *start > *stop))
3674
0
        *stop = *start;
3675
3676
233k
    return slicelength;
3677
233k
}
3678
3679
static int
3680
list_ass_subscript_lock_held(PyObject *_self, PyObject *item, PyObject *value)
3681
236k
{
3682
236k
    _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(_self);
3683
3684
236k
    PyListObject *self = (PyListObject *)_self;
3685
236k
    if (_PyIndex_Check(item)) {
3686
3.12k
        Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
3687
3.12k
        if (i == -1 && PyErr_Occurred())
3688
0
            return -1;
3689
3.12k
        if (i < 0)
3690
2.98k
            i += PyList_GET_SIZE(self);
3691
3.12k
        return list_ass_item_lock_held(self, i, value);
3692
3.12k
    }
3693
233k
    else if (PySlice_Check(item)) {
3694
233k
        Py_ssize_t start, stop, step;
3695
3696
233k
        if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
3697
0
            return -1;
3698
0
        }
3699
3700
233k
        if (value == NULL) {
3701
            /* delete slice */
3702
12
            PyObject **garbage;
3703
12
            size_t cur;
3704
12
            Py_ssize_t i;
3705
12
            int res;
3706
3707
12
            Py_ssize_t slicelength = adjust_slice_indexes(self, &start, &stop,
3708
12
                                                          step);
3709
3710
12
            if (step == 1)
3711
12
                return list_ass_slice_lock_held(self, start, stop, value);
3712
3713
0
            if (slicelength <= 0)
3714
0
                return 0;
3715
3716
0
            if (step < 0) {
3717
0
                stop = start + 1;
3718
0
                start = stop + step*(slicelength - 1) - 1;
3719
0
                step = -step;
3720
0
            }
3721
3722
0
            garbage = (PyObject**)
3723
0
                PyMem_Malloc(slicelength*sizeof(PyObject*));
3724
0
            if (!garbage) {
3725
0
                PyErr_NoMemory();
3726
0
                return -1;
3727
0
            }
3728
3729
            /* drawing pictures might help understand these for
3730
               loops. Basically, we memmove the parts of the
3731
               list that are *not* part of the slice: step-1
3732
               items for each item that is part of the slice,
3733
               and then tail end of the list that was not
3734
               covered by the slice */
3735
0
            for (cur = start, i = 0;
3736
0
                 cur < (size_t)stop;
3737
0
                 cur += step, i++) {
3738
0
                Py_ssize_t lim = step - 1;
3739
3740
0
                garbage[i] = PyList_GET_ITEM(self, cur);
3741
3742
0
                if (cur + step >= (size_t)Py_SIZE(self)) {
3743
0
                    lim = Py_SIZE(self) - cur - 1;
3744
0
                }
3745
3746
0
                memmove(self->ob_item + cur - i,
3747
0
                    self->ob_item + cur + 1,
3748
0
                    lim * sizeof(PyObject *));
3749
0
            }
3750
0
            cur = start + (size_t)slicelength * step;
3751
0
            if (cur < (size_t)Py_SIZE(self)) {
3752
0
                memmove(self->ob_item + cur - slicelength,
3753
0
                    self->ob_item + cur,
3754
0
                    (Py_SIZE(self) - cur) *
3755
0
                     sizeof(PyObject *));
3756
0
            }
3757
3758
0
            Py_SET_SIZE(self, Py_SIZE(self) - slicelength);
3759
0
            res = list_resize(self, Py_SIZE(self));
3760
3761
0
            for (i = 0; i < slicelength; i++) {
3762
0
                Py_DECREF(garbage[i]);
3763
0
            }
3764
0
            PyMem_Free(garbage);
3765
3766
0
            return res;
3767
0
        }
3768
233k
        else {
3769
            /* assign slice */
3770
233k
            PyObject *ins, *seq;
3771
233k
            PyObject **garbage, **seqitems, **selfitems;
3772
233k
            Py_ssize_t i;
3773
233k
            size_t cur;
3774
3775
            /* protect against a[::-1] = a */
3776
233k
            if (self == (PyListObject*)value) {
3777
0
                seq = list_slice_lock_held((PyListObject *)value, 0,
3778
0
                                            Py_SIZE(value));
3779
0
            }
3780
233k
            else {
3781
233k
                seq = PySequence_Fast(value,
3782
233k
                                      "must assign iterable "
3783
233k
                                      "to extended slice");
3784
233k
            }
3785
233k
            if (!seq)
3786
0
                return -1;
3787
3788
233k
            Py_ssize_t slicelength = adjust_slice_indexes(self, &start, &stop,
3789
233k
                                                          step);
3790
3791
233k
            if (step == 1) {
3792
233k
                int res = list_ass_slice_lock_held(self, start, stop, seq);
3793
233k
                Py_DECREF(seq);
3794
233k
                return res;
3795
233k
            }
3796
3797
0
            if (PySequence_Fast_GET_SIZE(seq) != slicelength) {
3798
0
                PyErr_Format(PyExc_ValueError,
3799
0
                    "attempt to assign sequence of "
3800
0
                    "size %zd to extended slice of "
3801
0
                    "size %zd",
3802
0
                         PySequence_Fast_GET_SIZE(seq),
3803
0
                         slicelength);
3804
0
                Py_DECREF(seq);
3805
0
                return -1;
3806
0
            }
3807
3808
0
            if (!slicelength) {
3809
0
                Py_DECREF(seq);
3810
0
                return 0;
3811
0
            }
3812
3813
0
            garbage = (PyObject**)
3814
0
                PyMem_Malloc(slicelength*sizeof(PyObject*));
3815
0
            if (!garbage) {
3816
0
                Py_DECREF(seq);
3817
0
                PyErr_NoMemory();
3818
0
                return -1;
3819
0
            }
3820
3821
0
            selfitems = self->ob_item;
3822
0
            seqitems = PySequence_Fast_ITEMS(seq);
3823
0
            for (cur = start, i = 0; i < slicelength;
3824
0
                 cur += (size_t)step, i++) {
3825
0
                garbage[i] = selfitems[cur];
3826
0
                ins = Py_NewRef(seqitems[i]);
3827
0
                selfitems[cur] = ins;
3828
0
            }
3829
3830
0
            for (i = 0; i < slicelength; i++) {
3831
0
                Py_DECREF(garbage[i]);
3832
0
            }
3833
3834
0
            PyMem_Free(garbage);
3835
0
            Py_DECREF(seq);
3836
3837
0
            return 0;
3838
0
        }
3839
233k
    }
3840
0
    else {
3841
0
        PyErr_Format(PyExc_TypeError,
3842
0
                     "list indices must be integers or slices, not %.200s",
3843
0
                     Py_TYPE(item)->tp_name);
3844
0
        return -1;
3845
0
    }
3846
236k
}
3847
3848
static int
3849
list_ass_subscript(PyObject *self, PyObject *item, PyObject *value)
3850
236k
{
3851
236k
    int res;
3852
#ifdef Py_GIL_DISABLED
3853
    if (PySlice_Check(item) && value != NULL && PyList_CheckExact(value)) {
3854
        Py_BEGIN_CRITICAL_SECTION2(self, value);
3855
        res = list_ass_subscript_lock_held(self, item, value);
3856
        Py_END_CRITICAL_SECTION2();
3857
        return res;
3858
    }
3859
#endif
3860
236k
    Py_BEGIN_CRITICAL_SECTION(self);
3861
236k
    res = list_ass_subscript_lock_held(self, item, value);
3862
236k
    Py_END_CRITICAL_SECTION();
3863
236k
    return res;
3864
236k
}
3865
3866
static PyMappingMethods list_as_mapping = {
3867
    list_length,
3868
    list_subscript,
3869
    list_ass_subscript
3870
};
3871
3872
PyTypeObject PyList_Type = {
3873
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
3874
    "list",
3875
    sizeof(PyListObject),
3876
    0,
3877
    list_dealloc,                               /* tp_dealloc */
3878
    0,                                          /* tp_vectorcall_offset */
3879
    0,                                          /* tp_getattr */
3880
    0,                                          /* tp_setattr */
3881
    0,                                          /* tp_as_async */
3882
    list_repr,                                  /* tp_repr */
3883
    0,                                          /* tp_as_number */
3884
    &list_as_sequence,                          /* tp_as_sequence */
3885
    &list_as_mapping,                           /* tp_as_mapping */
3886
    PyObject_HashNotImplemented,                /* tp_hash */
3887
    0,                                          /* tp_call */
3888
    0,                                          /* tp_str */
3889
    PyObject_GenericGetAttr,                    /* tp_getattro */
3890
    0,                                          /* tp_setattro */
3891
    0,                                          /* tp_as_buffer */
3892
    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
3893
        Py_TPFLAGS_BASETYPE | Py_TPFLAGS_LIST_SUBCLASS |
3894
        _Py_TPFLAGS_MATCH_SELF | Py_TPFLAGS_SEQUENCE,  /* tp_flags */
3895
    list___init____doc__,                       /* tp_doc */
3896
    list_traverse,                              /* tp_traverse */
3897
    list_clear_slot,                            /* tp_clear */
3898
    list_richcompare,                           /* tp_richcompare */
3899
    0,                                          /* tp_weaklistoffset */
3900
    list_iter,                                  /* tp_iter */
3901
    0,                                          /* tp_iternext */
3902
    list_methods,                               /* tp_methods */
3903
    0,                                          /* tp_members */
3904
    0,                                          /* tp_getset */
3905
    0,                                          /* tp_base */
3906
    0,                                          /* tp_dict */
3907
    0,                                          /* tp_descr_get */
3908
    0,                                          /* tp_descr_set */
3909
    0,                                          /* tp_dictoffset */
3910
    list___init__,                              /* tp_init */
3911
    PyType_GenericAlloc,                        /* tp_alloc */
3912
    PyType_GenericNew,                          /* tp_new */
3913
    PyObject_GC_Del,                            /* tp_free */
3914
    .tp_vectorcall = list_vectorcall,
3915
    .tp_version_tag = _Py_TYPE_VERSION_LIST,
3916
};
3917
3918
/*********************** List Iterator **************************/
3919
3920
static void listiter_dealloc(PyObject *);
3921
static int listiter_traverse(PyObject *, visitproc, void *);
3922
static PyObject *listiter_next(PyObject *);
3923
static PyObject *listiter_len(PyObject *, PyObject *);
3924
static PyObject *listiter_reduce_general(void *_it, int forward);
3925
static PyObject *listiter_reduce(PyObject *, PyObject *);
3926
static PyObject *listiter_setstate(PyObject *, PyObject *state);
3927
3928
PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it)).");
3929
PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");
3930
PyDoc_STRVAR(setstate_doc, "Set state information for unpickling.");
3931
3932
static PyMethodDef listiter_methods[] = {
3933
    {"__length_hint__", listiter_len, METH_NOARGS, length_hint_doc},
3934
    {"__reduce__", listiter_reduce, METH_NOARGS, reduce_doc},
3935
    {"__setstate__", listiter_setstate, METH_O, setstate_doc},
3936
    {NULL,              NULL}           /* sentinel */
3937
};
3938
3939
PyTypeObject PyListIter_Type = {
3940
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
3941
    "list_iterator",                            /* tp_name */
3942
    sizeof(_PyListIterObject),                  /* tp_basicsize */
3943
    0,                                          /* tp_itemsize */
3944
    /* methods */
3945
    listiter_dealloc,               /* tp_dealloc */
3946
    0,                                          /* tp_vectorcall_offset */
3947
    0,                                          /* tp_getattr */
3948
    0,                                          /* tp_setattr */
3949
    0,                                          /* tp_as_async */
3950
    0,                                          /* tp_repr */
3951
    0,                                          /* tp_as_number */
3952
    0,                                          /* tp_as_sequence */
3953
    0,                                          /* tp_as_mapping */
3954
    0,                                          /* tp_hash */
3955
    0,                                          /* tp_call */
3956
    0,                                          /* tp_str */
3957
    PyObject_GenericGetAttr,                    /* tp_getattro */
3958
    0,                                          /* tp_setattro */
3959
    0,                                          /* tp_as_buffer */
3960
    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
3961
    0,                                          /* tp_doc */
3962
    listiter_traverse,                          /* tp_traverse */
3963
    0,                                          /* tp_clear */
3964
    0,                                          /* tp_richcompare */
3965
    0,                                          /* tp_weaklistoffset */
3966
    PyObject_SelfIter,                          /* tp_iter */
3967
    listiter_next,                              /* tp_iternext */
3968
    listiter_methods,                           /* tp_methods */
3969
    0,                                          /* tp_members */
3970
};
3971
3972
3973
static PyObject *
3974
list_iter(PyObject *seq)
3975
33.3M
{
3976
33.3M
    if (!PyList_Check(seq)) {
3977
0
        PyErr_BadInternalCall();
3978
0
        return NULL;
3979
0
    }
3980
33.3M
    _PyListIterObject *it = _Py_FREELIST_POP(_PyListIterObject, list_iters);
3981
33.3M
    if (it == NULL) {
3982
1.96M
        it = PyObject_GC_New(_PyListIterObject, &PyListIter_Type);
3983
1.96M
        if (it == NULL) {
3984
0
            return NULL;
3985
0
        }
3986
1.96M
    }
3987
33.3M
    it->it_index = 0;
3988
33.3M
    it->it_seq = (PyListObject *)Py_NewRef(seq);
3989
33.3M
    _PyObject_GC_TRACK(it);
3990
33.3M
    return (PyObject *)it;
3991
33.3M
}
3992
3993
static void
3994
listiter_dealloc(PyObject *self)
3995
33.3M
{
3996
33.3M
    _PyListIterObject *it = (_PyListIterObject *)self;
3997
33.3M
    _PyObject_GC_UNTRACK(it);
3998
33.3M
    Py_XDECREF(it->it_seq);
3999
33.3M
    assert(Py_IS_TYPE(self, &PyListIter_Type));
4000
33.3M
    _Py_FREELIST_FREE(list_iters, it, PyObject_GC_Del);
4001
33.3M
}
4002
4003
static int
4004
listiter_traverse(PyObject *it, visitproc visit, void *arg)
4005
678k
{
4006
678k
    Py_VISIT(((_PyListIterObject *)it)->it_seq);
4007
678k
    return 0;
4008
678k
}
4009
4010
static PyObject *
4011
listiter_next(PyObject *self)
4012
142M
{
4013
142M
    _PyListIterObject *it = (_PyListIterObject *)self;
4014
142M
    Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4015
142M
    if (index < 0) {
4016
174
        return NULL;
4017
174
    }
4018
4019
142M
    PyObject *item = list_get_item_ref(it->it_seq, index);
4020
142M
    if (item == NULL) {
4021
        // out-of-bounds
4022
32.8M
        FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, -1);
4023
32.8M
#ifndef Py_GIL_DISABLED
4024
32.8M
        PyListObject *seq = it->it_seq;
4025
32.8M
        it->it_seq = NULL;
4026
32.8M
        Py_DECREF(seq);
4027
32.8M
#endif
4028
32.8M
        return NULL;
4029
32.8M
    }
4030
109M
    FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index + 1);
4031
109M
    return item;
4032
142M
}
4033
4034
static PyObject *
4035
listiter_len(PyObject *self, PyObject *Py_UNUSED(ignored))
4036
1.58M
{
4037
1.58M
    assert(self != NULL);
4038
1.58M
    _PyListIterObject *it = (_PyListIterObject *)self;
4039
1.58M
    Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4040
1.58M
    if (index >= 0) {
4041
1.58M
        Py_ssize_t len = PyList_GET_SIZE(it->it_seq) - index;
4042
1.58M
        if (len >= 0)
4043
1.58M
            return PyLong_FromSsize_t(len);
4044
1.58M
    }
4045
0
    return PyLong_FromLong(0);
4046
1.58M
}
4047
4048
static PyObject *
4049
listiter_reduce(PyObject *it, PyObject *Py_UNUSED(ignored))
4050
0
{
4051
0
    return listiter_reduce_general(it, 1);
4052
0
}
4053
4054
static PyObject *
4055
listiter_setstate(PyObject *self, PyObject *state)
4056
0
{
4057
0
    _PyListIterObject *it = (_PyListIterObject *)self;
4058
0
    Py_ssize_t index = PyLong_AsSsize_t(state);
4059
0
    if (index == -1 && PyErr_Occurred())
4060
0
        return NULL;
4061
0
    if (it->it_seq != NULL) {
4062
0
        if (index < -1)
4063
0
            index = -1;
4064
0
        else if (index > PyList_GET_SIZE(it->it_seq))
4065
0
            index = PyList_GET_SIZE(it->it_seq); /* iterator exhausted */
4066
0
        FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index);
4067
0
    }
4068
0
    Py_RETURN_NONE;
4069
0
}
4070
4071
/*********************** List Reverse Iterator **************************/
4072
4073
typedef struct {
4074
    PyObject_HEAD
4075
    Py_ssize_t it_index;
4076
    PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
4077
} listreviterobject;
4078
4079
static void listreviter_dealloc(PyObject *);
4080
static int listreviter_traverse(PyObject *, visitproc, void *);
4081
static PyObject *listreviter_next(PyObject *);
4082
static PyObject *listreviter_len(PyObject *, PyObject *);
4083
static PyObject *listreviter_reduce(PyObject *, PyObject *);
4084
static PyObject *listreviter_setstate(PyObject *, PyObject *);
4085
4086
static PyMethodDef listreviter_methods[] = {
4087
    {"__length_hint__", listreviter_len, METH_NOARGS, length_hint_doc},
4088
    {"__reduce__", listreviter_reduce, METH_NOARGS, reduce_doc},
4089
    {"__setstate__", listreviter_setstate, METH_O, setstate_doc},
4090
    {NULL,              NULL}           /* sentinel */
4091
};
4092
4093
PyTypeObject PyListRevIter_Type = {
4094
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
4095
    "list_reverseiterator",                     /* tp_name */
4096
    sizeof(listreviterobject),                  /* tp_basicsize */
4097
    0,                                          /* tp_itemsize */
4098
    /* methods */
4099
    listreviter_dealloc,                        /* tp_dealloc */
4100
    0,                                          /* tp_vectorcall_offset */
4101
    0,                                          /* tp_getattr */
4102
    0,                                          /* tp_setattr */
4103
    0,                                          /* tp_as_async */
4104
    0,                                          /* tp_repr */
4105
    0,                                          /* tp_as_number */
4106
    0,                                          /* tp_as_sequence */
4107
    0,                                          /* tp_as_mapping */
4108
    0,                                          /* tp_hash */
4109
    0,                                          /* tp_call */
4110
    0,                                          /* tp_str */
4111
    PyObject_GenericGetAttr,                    /* tp_getattro */
4112
    0,                                          /* tp_setattro */
4113
    0,                                          /* tp_as_buffer */
4114
    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
4115
    0,                                          /* tp_doc */
4116
    listreviter_traverse,                       /* tp_traverse */
4117
    0,                                          /* tp_clear */
4118
    0,                                          /* tp_richcompare */
4119
    0,                                          /* tp_weaklistoffset */
4120
    PyObject_SelfIter,                          /* tp_iter */
4121
    listreviter_next,                           /* tp_iternext */
4122
    listreviter_methods,                /* tp_methods */
4123
    0,
4124
};
4125
4126
/*[clinic input]
4127
list.__reversed__
4128
4129
Return a reverse iterator over the list.
4130
[clinic start generated code]*/
4131
4132
static PyObject *
4133
list___reversed___impl(PyListObject *self)
4134
/*[clinic end generated code: output=b166f073208c888c input=eadb6e17f8a6a280]*/
4135
36.4M
{
4136
36.4M
    listreviterobject *it;
4137
4138
36.4M
    it = PyObject_GC_New(listreviterobject, &PyListRevIter_Type);
4139
36.4M
    if (it == NULL)
4140
0
        return NULL;
4141
36.4M
    assert(PyList_Check(self));
4142
36.4M
    it->it_index = PyList_GET_SIZE(self) - 1;
4143
36.4M
    it->it_seq = (PyListObject*)Py_NewRef(self);
4144
36.4M
    PyObject_GC_Track(it);
4145
36.4M
    return (PyObject *)it;
4146
36.4M
}
4147
4148
static void
4149
listreviter_dealloc(PyObject *self)
4150
36.4M
{
4151
36.4M
    listreviterobject *it = (listreviterobject *)self;
4152
36.4M
    PyObject_GC_UnTrack(it);
4153
36.4M
    Py_XDECREF(it->it_seq);
4154
36.4M
    PyObject_GC_Del(it);
4155
36.4M
}
4156
4157
static int
4158
listreviter_traverse(PyObject *it, visitproc visit, void *arg)
4159
537
{
4160
537
    Py_VISIT(((listreviterobject *)it)->it_seq);
4161
537
    return 0;
4162
537
}
4163
4164
static PyObject *
4165
listreviter_next(PyObject *self)
4166
46.1M
{
4167
46.1M
    listreviterobject *it = (listreviterobject *)self;
4168
46.1M
    assert(it != NULL);
4169
46.1M
    Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4170
46.1M
    if (index < 0) {
4171
25.7M
        return NULL;
4172
25.7M
    }
4173
4174
20.3M
    PyListObject *seq = it->it_seq;
4175
20.3M
    assert(PyList_Check(seq));
4176
20.3M
    PyObject *item = list_get_item_ref(seq, index);
4177
20.3M
    if (item != NULL) {
4178
20.3M
        FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index - 1);
4179
20.3M
        return item;
4180
20.3M
    }
4181
0
    FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, -1);
4182
0
#ifndef Py_GIL_DISABLED
4183
0
    it->it_seq = NULL;
4184
0
    Py_DECREF(seq);
4185
0
#endif
4186
0
    return NULL;
4187
20.3M
}
4188
4189
static PyObject *
4190
listreviter_len(PyObject *self, PyObject *Py_UNUSED(ignored))
4191
0
{
4192
0
    listreviterobject *it = (listreviterobject *)self;
4193
0
    Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4194
0
    Py_ssize_t len = index + 1;
4195
0
    if (it->it_seq == NULL || PyList_GET_SIZE(it->it_seq) < len)
4196
0
        len = 0;
4197
0
    return PyLong_FromSsize_t(len);
4198
0
}
4199
4200
static PyObject *
4201
listreviter_reduce(PyObject *it, PyObject *Py_UNUSED(ignored))
4202
0
{
4203
0
    return listiter_reduce_general(it, 0);
4204
0
}
4205
4206
static PyObject *
4207
listreviter_setstate(PyObject *self, PyObject *state)
4208
0
{
4209
0
    listreviterobject *it = (listreviterobject *)self;
4210
0
    Py_ssize_t index = PyLong_AsSsize_t(state);
4211
0
    if (index == -1 && PyErr_Occurred())
4212
0
        return NULL;
4213
0
    if (it->it_seq != NULL) {
4214
0
        if (index < -1)
4215
0
            index = -1;
4216
0
        else if (index > PyList_GET_SIZE(it->it_seq) - 1)
4217
0
            index = PyList_GET_SIZE(it->it_seq) - 1;
4218
0
        FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index);
4219
0
    }
4220
0
    Py_RETURN_NONE;
4221
0
}
4222
4223
/* common pickling support */
4224
4225
static PyObject *
4226
listiter_reduce_general(void *_it, int forward)
4227
0
{
4228
0
    PyObject *list;
4229
0
    PyObject *iter;
4230
4231
    /* _PyEval_GetBuiltin can invoke arbitrary code,
4232
     * call must be before access of iterator pointers.
4233
     * see issue #101765 */
4234
4235
0
    if (forward) {
4236
0
        iter = _PyEval_GetBuiltin(&_Py_ID(iter));
4237
0
        _PyListIterObject *it = (_PyListIterObject *)_it;
4238
0
        Py_ssize_t idx = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4239
0
        if (idx >= 0) {
4240
0
            return Py_BuildValue("N(O)n", iter, it->it_seq, idx);
4241
0
        }
4242
0
    } else {
4243
0
        iter = _PyEval_GetBuiltin(&_Py_ID(reversed));
4244
0
        listreviterobject *it = (listreviterobject *)_it;
4245
0
        Py_ssize_t idx = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4246
0
        if (idx >= 0) {
4247
0
            return Py_BuildValue("N(O)n", iter, it->it_seq, idx);
4248
0
        }
4249
0
    }
4250
    /* empty iterator, create an empty list */
4251
0
    list = PyList_New(0);
4252
0
    if (list == NULL)
4253
0
        return NULL;
4254
0
    return Py_BuildValue("N(N)", iter, list);
4255
0
}