Coverage Report

Created: 2026-04-20 06:54

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython3/Objects/listobject.c
Line
Count
Source
1
/* List object implementation */
2
3
#include "Python.h"
4
#include "pycore_abstract.h"      // _PyIndex_Check()
5
#include "pycore_ceval.h"         // _PyEval_GetBuiltin()
6
#include "pycore_critical_section.h"  // _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED()
7
#include "pycore_dict.h"          // _PyDictViewObject
8
#include "pycore_freelist.h"      // _Py_FREELIST_FREE(), _Py_FREELIST_POP()
9
#include "pycore_interp.h"        // PyInterpreterState.list
10
#include "pycore_list.h"          // struct _Py_list_freelist, _PyListIterObject
11
#include "pycore_long.h"          // _PyLong_DigitCount
12
#include "pycore_modsupport.h"    // _PyArg_NoKwnames()
13
#include "pycore_object.h"        // _PyObject_GC_TRACK(), _PyDebugAllocatorStats()
14
#include "pycore_pyatomic_ft_wrappers.h"
15
#include "pycore_setobject.h"     // _PySet_NextEntry()
16
#include "pycore_stackref.h"      // _Py_TryIncrefCompareStackRef()
17
#include "pycore_tuple.h"         // _PyTuple_FromArraySteal()
18
#include "pycore_typeobject.h"    // _Py_TYPE_VERSION_LIST
19
#include <stddef.h>
20
21
/*[clinic input]
22
class list "PyListObject *" "&PyList_Type"
23
[clinic start generated code]*/
24
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=f9b222678f9f71e0]*/
25
26
#include "clinic/listobject.c.h"
27
28
_Py_DECLARE_STR(list_err, "list index out of range");
29
30
#ifdef Py_GIL_DISABLED
31
typedef struct {
32
    Py_ssize_t allocated;
33
    PyObject *ob_item[];
34
} _PyListArray;
35
36
static _PyListArray *
37
list_allocate_array(size_t capacity)
38
{
39
    if (capacity > PY_SSIZE_T_MAX/sizeof(PyObject*) - 1) {
40
        return NULL;
41
    }
42
    _PyListArray *array = PyMem_Malloc(sizeof(_PyListArray) + capacity * sizeof(PyObject *));
43
    if (array == NULL) {
44
        return NULL;
45
    }
46
    array->allocated = capacity;
47
    return array;
48
}
49
50
static Py_ssize_t
51
list_capacity(PyObject **items)
52
{
53
    _PyListArray *array = _Py_CONTAINER_OF(items, _PyListArray, ob_item);
54
    return array->allocated;
55
}
56
#endif
57
58
static void
59
free_list_items(PyObject** items, bool use_qsbr)
60
16.2M
{
61
#ifdef Py_GIL_DISABLED
62
    _PyListArray *array = _Py_CONTAINER_OF(items, _PyListArray, ob_item);
63
    if (use_qsbr) {
64
        size_t size = sizeof(_PyListArray) + array->allocated * sizeof(PyObject *);
65
        _PyMem_FreeDelayed(array, size);
66
    }
67
    else {
68
        PyMem_Free(array);
69
    }
70
#else
71
16.2M
    PyMem_Free(items);
72
16.2M
#endif
73
16.2M
}
74
75
static void
76
ensure_shared_on_resize(PyListObject *self)
77
16.1M
{
78
#ifdef Py_GIL_DISABLED
79
    // We can't use _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED here because
80
    // the `CALL_LIST_APPEND` bytecode handler may lock the list without
81
    // a critical section.
82
    assert(Py_REFCNT(self) == 1 || PyMutex_IsLocked(&_PyObject_CAST(self)->ob_mutex));
83
84
    // Ensure that the list array is freed using QSBR if we are not the
85
    // owning thread.
86
    if (!_Py_IsOwnedByCurrentThread((PyObject *)self) &&
87
        !_PyObject_GC_IS_SHARED(self))
88
    {
89
        _PyObject_GC_SET_SHARED(self);
90
    }
91
#endif
92
16.1M
}
93
94
/* Ensure ob_item has room for at least newsize elements, and set
95
 * ob_size to newsize.  If newsize > ob_size on entry, the content
96
 * of the new slots at exit is undefined heap trash; it's the caller's
97
 * responsibility to overwrite them with sane values.
98
 * The number of allocated elements may grow, shrink, or stay the same.
99
 * Failure is impossible if newsize <= self.allocated on entry.
100
 * Note that self->ob_item may change, and even if newsize is less
101
 * than ob_size on entry.
102
 */
103
static int
104
list_resize(PyListObject *self, Py_ssize_t newsize)
105
16.5M
{
106
16.5M
    size_t new_allocated, target_bytes;
107
16.5M
    Py_ssize_t allocated = self->allocated;
108
109
    /* Bypass realloc() when a previous overallocation is large enough
110
       to accommodate the newsize.  If the newsize falls lower than half
111
       the allocated size, then proceed with the realloc() to shrink the list.
112
    */
113
16.5M
    if (allocated >= newsize && newsize >= (allocated >> 1)) {
114
373k
        assert(self->ob_item != NULL || newsize == 0);
115
373k
        Py_SET_SIZE(self, newsize);
116
373k
        return 0;
117
373k
    }
118
119
    /* This over-allocates proportional to the list size, making room
120
     * for additional growth.  The over-allocation is mild, but is
121
     * enough to give linear-time amortized behavior over a long
122
     * sequence of appends() in the presence of a poorly-performing
123
     * system realloc().
124
     * Add padding to make the allocated size multiple of 4.
125
     * The growth pattern is:  0, 4, 8, 16, 24, 32, 40, 52, 64, 76, ...
126
     * Note: new_allocated won't overflow because the largest possible value
127
     *       is PY_SSIZE_T_MAX * (9 / 8) + 6 which always fits in a size_t.
128
     */
129
16.1M
    new_allocated = ((size_t)newsize + (newsize >> 3) + 6) & ~(size_t)3;
130
    /* Do not overallocate if the new size is closer to overallocated size
131
     * than to the old size.
132
     */
133
16.1M
    if (newsize - Py_SIZE(self) > (Py_ssize_t)(new_allocated - newsize))
134
35.4k
        new_allocated = ((size_t)newsize + 3) & ~(size_t)3;
135
136
16.1M
    if (newsize == 0)
137
0
        new_allocated = 0;
138
139
16.1M
    ensure_shared_on_resize(self);
140
141
#ifdef Py_GIL_DISABLED
142
    _PyListArray *array = list_allocate_array(new_allocated);
143
    if (array == NULL) {
144
        if (newsize < allocated) {
145
            // Never fail when shrinking allocations
146
            Py_SET_SIZE(self, newsize);
147
            return 0;
148
        }
149
        PyErr_NoMemory();
150
        return -1;
151
    }
152
    PyObject **old_items = self->ob_item;
153
    if (self->ob_item) {
154
        if (new_allocated < (size_t)allocated) {
155
            target_bytes = new_allocated * sizeof(PyObject*);
156
        }
157
        else {
158
            target_bytes = allocated * sizeof(PyObject*);
159
        }
160
        memcpy(array->ob_item, self->ob_item, target_bytes);
161
    }
162
    if (new_allocated > (size_t)allocated) {
163
        memset(array->ob_item + allocated, 0, sizeof(PyObject *) * (new_allocated - allocated));
164
    }
165
     _Py_atomic_store_ptr_release(&self->ob_item, &array->ob_item);
166
    self->allocated = new_allocated;
167
    Py_SET_SIZE(self, newsize);
168
    if (old_items != NULL) {
169
        free_list_items(old_items, _PyObject_GC_IS_SHARED(self));
170
    }
171
#else
172
16.1M
    PyObject **items;
173
16.1M
    if (new_allocated <= (size_t)PY_SSIZE_T_MAX / sizeof(PyObject *)) {
174
16.1M
        target_bytes = new_allocated * sizeof(PyObject *);
175
16.1M
        items = (PyObject **)PyMem_Realloc(self->ob_item, target_bytes);
176
16.1M
    }
177
0
    else {
178
        // integer overflow
179
0
        items = NULL;
180
0
    }
181
16.1M
    if (items == NULL) {
182
0
        if (newsize < allocated) {
183
            // Never fail when shrinking allocations
184
0
            Py_SET_SIZE(self, newsize);
185
0
            return 0;
186
0
        }
187
0
        PyErr_NoMemory();
188
0
        return -1;
189
0
    }
190
16.1M
    self->ob_item = items;
191
16.1M
    Py_SET_SIZE(self, newsize);
192
16.1M
    self->allocated = new_allocated;
193
16.1M
#endif
194
16.1M
    return 0;
195
16.1M
}
196
197
static int
198
list_preallocate_exact(PyListObject *self, Py_ssize_t size)
199
627k
{
200
627k
    PyObject **items;
201
627k
    assert(self->ob_item == NULL);
202
627k
    assert(size > 0);
203
204
    /* Since the Python memory allocator has granularity of 16 bytes on 64-bit
205
     * platforms (8 on 32-bit), there is no benefit of allocating space for
206
     * the odd number of items, and there is no drawback of rounding the
207
     * allocated size up to the nearest even number.
208
     */
209
627k
    size = (size + 1) & ~(size_t)1;
210
#ifdef Py_GIL_DISABLED
211
    _PyListArray *array = list_allocate_array(size);
212
    if (array == NULL) {
213
        PyErr_NoMemory();
214
        return -1;
215
    }
216
    items = array->ob_item;
217
    memset(items, 0, size * sizeof(PyObject *));
218
#else
219
627k
    items = PyMem_New(PyObject*, size);
220
627k
    if (items == NULL) {
221
0
        PyErr_NoMemory();
222
0
        return -1;
223
0
    }
224
627k
#endif
225
627k
    FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item, items);
226
627k
    self->allocated = size;
227
627k
    return 0;
228
627k
}
229
230
/* Print summary info about the state of the optimized allocator */
231
void
232
_PyList_DebugMallocStats(FILE *out)
233
0
{
234
0
    _PyDebugAllocatorStats(out,
235
0
                           "free PyListObject",
236
0
                            _Py_FREELIST_SIZE(lists),
237
0
                           sizeof(PyListObject));
238
0
}
239
240
PyObject *
241
PyList_New(Py_ssize_t size)
242
58.9M
{
243
58.9M
    if (size < 0) {
244
0
        PyErr_BadInternalCall();
245
0
        return NULL;
246
0
    }
247
248
58.9M
    PyListObject *op = _Py_FREELIST_POP(PyListObject, lists);
249
58.9M
    if (op == NULL) {
250
24.3M
        op = PyObject_GC_New(PyListObject, &PyList_Type);
251
24.3M
        if (op == NULL) {
252
0
            return NULL;
253
0
        }
254
24.3M
    }
255
58.9M
    if (size <= 0) {
256
57.5M
        op->ob_item = NULL;
257
57.5M
    }
258
1.37M
    else {
259
#ifdef Py_GIL_DISABLED
260
        _PyListArray *array = list_allocate_array(size);
261
        if (array == NULL) {
262
            Py_DECREF(op);
263
            return PyErr_NoMemory();
264
        }
265
        memset(&array->ob_item, 0, size * sizeof(PyObject *));
266
        op->ob_item = array->ob_item;
267
#else
268
1.37M
        op->ob_item = (PyObject **) PyMem_Calloc(size, sizeof(PyObject *));
269
1.37M
#endif
270
1.37M
        if (op->ob_item == NULL) {
271
0
            Py_DECREF(op);
272
0
            return PyErr_NoMemory();
273
0
        }
274
1.37M
    }
275
58.9M
    Py_SET_SIZE(op, size);
276
58.9M
    op->allocated = size;
277
58.9M
    _PyObject_GC_TRACK(op);
278
58.9M
    return (PyObject *) op;
279
58.9M
}
280
281
static PyObject *
282
list_new_prealloc(Py_ssize_t size)
283
1.69M
{
284
1.69M
    assert(size > 0);
285
1.69M
    PyListObject *op = (PyListObject *) PyList_New(0);
286
1.69M
    if (op == NULL) {
287
0
        return NULL;
288
0
    }
289
1.69M
    assert(op->ob_item == NULL);
290
#ifdef Py_GIL_DISABLED
291
    _PyListArray *array = list_allocate_array(size);
292
    if (array == NULL) {
293
        Py_DECREF(op);
294
        return PyErr_NoMemory();
295
    }
296
    op->ob_item = array->ob_item;
297
#else
298
1.69M
    op->ob_item = PyMem_New(PyObject *, size);
299
1.69M
    if (op->ob_item == NULL) {
300
0
        Py_DECREF(op);
301
0
        return PyErr_NoMemory();
302
0
    }
303
1.69M
#endif
304
1.69M
    op->allocated = size;
305
1.69M
    return (PyObject *) op;
306
1.69M
}
307
308
Py_ssize_t
309
PyList_Size(PyObject *op)
310
25.1k
{
311
25.1k
    if (!PyList_Check(op)) {
312
0
        PyErr_BadInternalCall();
313
0
        return -1;
314
0
    }
315
25.1k
    else {
316
25.1k
        return PyList_GET_SIZE(op);
317
25.1k
    }
318
25.1k
}
319
320
static inline int
321
valid_index(Py_ssize_t i, Py_ssize_t limit)
322
42.0M
{
323
    /* The cast to size_t lets us use just a single comparison
324
       to check whether i is in the range: 0 <= i < limit.
325
326
       See:  Section 14.2 "Bounds Checking" in the Agner Fog
327
       optimization manual found at:
328
       https://www.agner.org/optimize/optimizing_cpp.pdf
329
    */
330
42.0M
    return (size_t) i < (size_t) limit;
331
42.0M
}
332
333
#ifdef Py_GIL_DISABLED
334
335
static PyObject *
336
list_item_impl(PyListObject *self, Py_ssize_t idx)
337
{
338
    PyObject *item = NULL;
339
    Py_BEGIN_CRITICAL_SECTION(self);
340
    if (!_PyObject_GC_IS_SHARED(self)) {
341
        _PyObject_GC_SET_SHARED(self);
342
    }
343
    Py_ssize_t size = Py_SIZE(self);
344
    if (!valid_index(idx, size)) {
345
        goto exit;
346
    }
347
    item = _Py_NewRefWithLock(self->ob_item[idx]);
348
exit:
349
    Py_END_CRITICAL_SECTION();
350
    return item;
351
}
352
353
static inline PyObject*
354
list_get_item_ref(PyListObject *op, Py_ssize_t i)
355
{
356
    if (!_Py_IsOwnedByCurrentThread((PyObject *)op) && !_PyObject_GC_IS_SHARED(op)) {
357
        return list_item_impl(op, i);
358
    }
359
    // Need atomic operation for the getting size.
360
    Py_ssize_t size = PyList_GET_SIZE(op);
361
    if (!valid_index(i, size)) {
362
        return NULL;
363
    }
364
    PyObject **ob_item = _Py_atomic_load_ptr(&op->ob_item);
365
    if (ob_item == NULL) {
366
        return NULL;
367
    }
368
    Py_ssize_t cap = list_capacity(ob_item);
369
    assert(cap != -1);
370
    if (!valid_index(i, cap)) {
371
        return NULL;
372
    }
373
    PyObject *item = _Py_TryXGetRef(&ob_item[i]);
374
    if (item == NULL) {
375
        return list_item_impl(op, i);
376
    }
377
    return item;
378
}
379
#else
380
static inline PyObject*
381
list_get_item_ref(PyListObject *op, Py_ssize_t i)
382
33.5M
{
383
33.5M
    if (!valid_index(i, Py_SIZE(op))) {
384
292k
        return NULL;
385
292k
    }
386
33.2M
    return Py_NewRef(PyList_GET_ITEM(op, i));
387
33.2M
}
388
#endif
389
390
PyObject *
391
PyList_GetItem(PyObject *op, Py_ssize_t i)
392
986
{
393
986
    if (!PyList_Check(op)) {
394
0
        PyErr_BadInternalCall();
395
0
        return NULL;
396
0
    }
397
986
    if (!valid_index(i, Py_SIZE(op))) {
398
0
        _Py_DECLARE_STR(list_err, "list index out of range");
399
0
        PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
400
0
        return NULL;
401
0
    }
402
986
    return ((PyListObject *)op) -> ob_item[i];
403
986
}
404
405
PyObject *
406
PyList_GetItemRef(PyObject *op, Py_ssize_t i)
407
764
{
408
764
    if (!PyList_Check(op)) {
409
0
        PyErr_SetString(PyExc_TypeError, "expected a list");
410
0
        return NULL;
411
0
    }
412
764
    PyObject *item = list_get_item_ref((PyListObject *)op, i);
413
764
    if (item == NULL) {
414
0
        _Py_DECLARE_STR(list_err, "list index out of range");
415
0
        PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
416
0
        return NULL;
417
0
    }
418
764
    return item;
419
764
}
420
421
PyObject *
422
_PyList_GetItemRef(PyListObject *list, Py_ssize_t i)
423
0
{
424
0
    return list_get_item_ref(list, i);
425
0
}
426
427
#ifdef Py_GIL_DISABLED
428
int
429
_PyList_GetItemRefNoLock(PyListObject *list, Py_ssize_t i, _PyStackRef *result)
430
{
431
    assert(_Py_IsOwnedByCurrentThread((PyObject *)list) ||
432
           _PyObject_GC_IS_SHARED(list));
433
    if (!valid_index(i, PyList_GET_SIZE(list))) {
434
        return 0;
435
    }
436
    PyObject **ob_item = _Py_atomic_load_ptr(&list->ob_item);
437
    if (ob_item == NULL) {
438
        return 0;
439
    }
440
    Py_ssize_t cap = list_capacity(ob_item);
441
    assert(cap != -1);
442
    if (!valid_index(i, cap)) {
443
        return 0;
444
    }
445
    PyObject *obj = _Py_atomic_load_ptr(&ob_item[i]);
446
    if (obj == NULL || !_Py_TryIncrefCompareStackRef(&ob_item[i], obj, result)) {
447
        return -1;
448
    }
449
    return 1;
450
}
451
#endif
452
453
int
454
PyList_SetItem(PyObject *op, Py_ssize_t i,
455
               PyObject *newitem)
456
4.70M
{
457
4.70M
    if (!PyList_Check(op)) {
458
0
        Py_XDECREF(newitem);
459
0
        PyErr_BadInternalCall();
460
0
        return -1;
461
0
    }
462
4.70M
    int ret;
463
4.70M
    PyListObject *self = ((PyListObject *)op);
464
4.70M
    Py_BEGIN_CRITICAL_SECTION(self);
465
4.70M
    if (!valid_index(i, Py_SIZE(self))) {
466
0
        Py_XDECREF(newitem);
467
0
        PyErr_SetString(PyExc_IndexError,
468
0
                        "list assignment index out of range");
469
0
        ret = -1;
470
0
        goto end;
471
0
    }
472
4.70M
    PyObject *tmp = self->ob_item[i];
473
4.70M
    FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item[i], newitem);
474
4.70M
    Py_XDECREF(tmp);
475
4.70M
    ret = 0;
476
4.70M
end:;
477
4.70M
    Py_END_CRITICAL_SECTION();
478
4.70M
    return ret;
479
4.70M
}
480
481
static int
482
ins1(PyListObject *self, Py_ssize_t where, PyObject *v)
483
7.82k
{
484
7.82k
    Py_ssize_t i, n = Py_SIZE(self);
485
7.82k
    PyObject **items;
486
7.82k
    if (v == NULL) {
487
0
        PyErr_BadInternalCall();
488
0
        return -1;
489
0
    }
490
491
7.82k
    assert((size_t)n + 1 < PY_SSIZE_T_MAX);
492
7.82k
    if (list_resize(self, n+1) < 0)
493
0
        return -1;
494
495
7.82k
    if (where < 0) {
496
0
        where += n;
497
0
        if (where < 0)
498
0
            where = 0;
499
0
    }
500
7.82k
    if (where > n)
501
0
        where = n;
502
7.82k
    items = self->ob_item;
503
41.3k
    for (i = n; --i >= where; )
504
33.5k
        FT_ATOMIC_STORE_PTR_RELEASE(items[i+1], items[i]);
505
7.82k
    FT_ATOMIC_STORE_PTR_RELEASE(items[where], Py_NewRef(v));
506
7.82k
    return 0;
507
7.82k
}
508
509
int
510
PyList_Insert(PyObject *op, Py_ssize_t where, PyObject *newitem)
511
22
{
512
22
    if (!PyList_Check(op)) {
513
0
        PyErr_BadInternalCall();
514
0
        return -1;
515
0
    }
516
22
    PyListObject *self = (PyListObject *)op;
517
22
    int err;
518
22
    Py_BEGIN_CRITICAL_SECTION(self);
519
22
    err = ins1(self, where, newitem);
520
22
    Py_END_CRITICAL_SECTION();
521
22
    return err;
522
22
}
523
524
/* internal, used by _PyList_AppendTakeRef */
525
int
526
_PyList_AppendTakeRefListResize(PyListObject *self, PyObject *newitem)
527
15.6M
{
528
15.6M
    Py_ssize_t len = Py_SIZE(self);
529
15.6M
    assert(self->allocated == -1 || self->allocated == len);
530
15.6M
    if (list_resize(self, len + 1) < 0) {
531
0
        Py_DECREF(newitem);
532
0
        return -1;
533
0
    }
534
15.6M
    FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item[len], newitem);
535
15.6M
    return 0;
536
15.6M
}
537
538
int
539
PyList_Append(PyObject *op, PyObject *newitem)
540
97.7M
{
541
97.7M
    if (PyList_Check(op) && (newitem != NULL)) {
542
97.7M
        int ret;
543
97.7M
        Py_BEGIN_CRITICAL_SECTION(op);
544
97.7M
        ret = _PyList_AppendTakeRef((PyListObject *)op, Py_NewRef(newitem));
545
97.7M
        Py_END_CRITICAL_SECTION();
546
97.7M
        return ret;
547
97.7M
    }
548
0
    PyErr_BadInternalCall();
549
0
    return -1;
550
97.7M
}
551
552
/* Methods */
553
554
static void
555
list_dealloc(PyObject *self)
556
59.2M
{
557
59.2M
    PyListObject *op = (PyListObject *)self;
558
59.2M
    Py_ssize_t i;
559
59.2M
    PyObject_GC_UnTrack(op);
560
59.2M
    if (op->ob_item != NULL) {
561
        /* Do it backwards, for Christian Tismer.
562
           There's a simple test case where somehow this reduces
563
           thrashing when a *very* large list is created and
564
           immediately deleted. */
565
16.2M
        i = Py_SIZE(op);
566
301M
        while (--i >= 0) {
567
285M
            Py_XDECREF(op->ob_item[i]);
568
285M
        }
569
16.2M
        free_list_items(op->ob_item, false);
570
16.2M
        op->ob_item = NULL;
571
16.2M
    }
572
59.2M
    if (PyList_CheckExact(op)) {
573
59.2M
        _Py_FREELIST_FREE(lists, op, PyObject_GC_Del);
574
59.2M
    }
575
1.44k
    else {
576
1.44k
        PyObject_GC_Del(op);
577
1.44k
    }
578
59.2M
}
579
580
static PyObject *
581
list_repr_impl(PyListObject *v)
582
0
{
583
0
    int res = Py_ReprEnter((PyObject*)v);
584
0
    if (res != 0) {
585
0
        return (res > 0 ? PyUnicode_FromString("[...]") : NULL);
586
0
    }
587
588
    /* "[" + "1" + ", 2" * (len - 1) + "]" */
589
0
    Py_ssize_t prealloc = 1 + 1 + (2 + 1) * (Py_SIZE(v) - 1) + 1;
590
0
    PyUnicodeWriter *writer = PyUnicodeWriter_Create(prealloc);
591
0
    PyObject *item = NULL;
592
0
    if (writer == NULL) {
593
0
        goto error;
594
0
    }
595
596
0
    if (PyUnicodeWriter_WriteChar(writer, '[') < 0) {
597
0
        goto error;
598
0
    }
599
600
    /* Do repr() on each element.  Note that this may mutate the list,
601
       so must refetch the list size on each iteration. */
602
0
    for (Py_ssize_t i = 0; i < Py_SIZE(v); ++i) {
603
        /* Hold a strong reference since repr(item) can mutate the list */
604
0
        item = Py_XNewRef(v->ob_item[i]);
605
606
0
        if (i > 0) {
607
0
            if (PyUnicodeWriter_WriteChar(writer, ',') < 0) {
608
0
                goto error;
609
0
            }
610
0
            if (PyUnicodeWriter_WriteChar(writer, ' ') < 0) {
611
0
                goto error;
612
0
            }
613
0
        }
614
615
0
        if (PyUnicodeWriter_WriteRepr(writer, item) < 0) {
616
0
            goto error;
617
0
        }
618
0
        Py_CLEAR(item);
619
0
    }
620
621
0
    if (PyUnicodeWriter_WriteChar(writer, ']') < 0) {
622
0
        goto error;
623
0
    }
624
625
0
    Py_ReprLeave((PyObject *)v);
626
0
    return PyUnicodeWriter_Finish(writer);
627
628
0
error:
629
0
    Py_XDECREF(item);
630
0
    PyUnicodeWriter_Discard(writer);
631
0
    Py_ReprLeave((PyObject *)v);
632
0
    return NULL;
633
0
}
634
635
static PyObject *
636
list_repr(PyObject *self)
637
0
{
638
0
    if (PyList_GET_SIZE(self) == 0) {
639
0
        return PyUnicode_FromString("[]");
640
0
    }
641
0
    PyListObject *v = (PyListObject *)self;
642
0
    PyObject *ret = NULL;
643
0
    Py_BEGIN_CRITICAL_SECTION(v);
644
0
    ret = list_repr_impl(v);
645
0
    Py_END_CRITICAL_SECTION();
646
0
    return ret;
647
0
}
648
649
static Py_ssize_t
650
list_length(PyObject *a)
651
18.9M
{
652
18.9M
    return PyList_GET_SIZE(a);
653
18.9M
}
654
655
static int
656
list_contains(PyObject *aa, PyObject *el)
657
3.07k
{
658
659
21.8k
    for (Py_ssize_t i = 0; ; i++) {
660
21.8k
        PyObject *item = list_get_item_ref((PyListObject *)aa, i);
661
21.8k
        if (item == NULL) {
662
            // out-of-bounds
663
1.40k
            return 0;
664
1.40k
        }
665
20.4k
        int cmp = PyObject_RichCompareBool(item, el, Py_EQ);
666
20.4k
        Py_DECREF(item);
667
20.4k
        if (cmp != 0) {
668
1.66k
            return cmp;
669
1.66k
        }
670
20.4k
    }
671
0
    return 0;
672
3.07k
}
673
674
static PyObject *
675
list_item(PyObject *aa, Py_ssize_t i)
676
2.14M
{
677
2.14M
    PyListObject *a = (PyListObject *)aa;
678
2.14M
    if (!valid_index(i, PyList_GET_SIZE(a))) {
679
2.12M
        PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
680
2.12M
        return NULL;
681
2.12M
    }
682
14.9k
    PyObject *item;
683
#ifdef Py_GIL_DISABLED
684
    item = list_get_item_ref(a, i);
685
    if (item == NULL) {
686
        PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
687
        return NULL;
688
    }
689
#else
690
14.9k
    item = Py_NewRef(a->ob_item[i]);
691
14.9k
#endif
692
14.9k
    return item;
693
2.14M
}
694
695
static PyObject *
696
list_slice_lock_held(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
697
1.69M
{
698
1.69M
    PyListObject *np;
699
1.69M
    PyObject **src, **dest;
700
1.69M
    Py_ssize_t i, len;
701
1.69M
    len = ihigh - ilow;
702
1.69M
    if (len <= 0) {
703
95
        return PyList_New(0);
704
95
    }
705
1.69M
    np = (PyListObject *) list_new_prealloc(len);
706
1.69M
    if (np == NULL)
707
0
        return NULL;
708
709
1.69M
    src = a->ob_item + ilow;
710
1.69M
    dest = np->ob_item;
711
3.42M
    for (i = 0; i < len; i++) {
712
1.73M
        PyObject *v = src[i];
713
1.73M
        dest[i] = Py_NewRef(v);
714
1.73M
    }
715
1.69M
    Py_SET_SIZE(np, len);
716
1.69M
    return (PyObject *)np;
717
1.69M
}
718
719
PyObject *
720
_PyList_BinarySlice(PyObject *container, PyObject *start, PyObject *stop)
721
1.15k
{
722
1.15k
    assert(PyList_CheckExact(container));
723
1.15k
    Py_ssize_t istart = 0;
724
1.15k
    Py_ssize_t istop = PY_SSIZE_T_MAX;
725
    /* Unpack the index values before acquiring the lock, since
726
     * _PyEval_SliceIndex may call __index__ which could execute
727
     * arbitrary Python code. */
728
1.15k
    if (!_PyEval_SliceIndex(start, &istart)) {
729
0
        return NULL;
730
0
    }
731
1.15k
    if (!_PyEval_SliceIndex(stop, &istop)) {
732
0
        return NULL;
733
0
    }
734
1.15k
    PyObject *ret;
735
1.15k
    Py_BEGIN_CRITICAL_SECTION(container);
736
1.15k
    Py_ssize_t len = Py_SIZE(container);
737
1.15k
    PySlice_AdjustIndices(len, &istart, &istop, 1);
738
1.15k
    ret = list_slice_lock_held((PyListObject *)container, istart, istop);
739
1.15k
    Py_END_CRITICAL_SECTION();
740
1.15k
    return ret;
741
1.15k
}
742
743
PyObject *
744
PyList_GetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
745
0
{
746
0
    if (!PyList_Check(a)) {
747
0
        PyErr_BadInternalCall();
748
0
        return NULL;
749
0
    }
750
0
    PyObject *ret;
751
0
    Py_BEGIN_CRITICAL_SECTION(a);
752
0
    if (ilow < 0) {
753
0
        ilow = 0;
754
0
    }
755
0
    else if (ilow > Py_SIZE(a)) {
756
0
        ilow = Py_SIZE(a);
757
0
    }
758
0
    if (ihigh < ilow) {
759
0
        ihigh = ilow;
760
0
    }
761
0
    else if (ihigh > Py_SIZE(a)) {
762
0
        ihigh = Py_SIZE(a);
763
0
    }
764
0
    ret = list_slice_lock_held((PyListObject *)a, ilow, ihigh);
765
0
    Py_END_CRITICAL_SECTION();
766
0
    return ret;
767
0
}
768
769
static PyObject *
770
list_concat_lock_held(PyListObject *a, PyListObject *b)
771
87
{
772
87
    Py_ssize_t size;
773
87
    Py_ssize_t i;
774
87
    PyObject **src, **dest;
775
87
    PyListObject *np;
776
87
    assert((size_t)Py_SIZE(a) + (size_t)Py_SIZE(b) < PY_SSIZE_T_MAX);
777
87
    size = Py_SIZE(a) + Py_SIZE(b);
778
87
    if (size == 0) {
779
0
        return PyList_New(0);
780
0
    }
781
87
    np = (PyListObject *) list_new_prealloc(size);
782
87
    if (np == NULL) {
783
0
        return NULL;
784
0
    }
785
87
    src = a->ob_item;
786
87
    dest = np->ob_item;
787
780
    for (i = 0; i < Py_SIZE(a); i++) {
788
693
        PyObject *v = src[i];
789
693
        dest[i] = Py_NewRef(v);
790
693
    }
791
87
    src = b->ob_item;
792
87
    dest = np->ob_item + Py_SIZE(a);
793
638
    for (i = 0; i < Py_SIZE(b); i++) {
794
551
        PyObject *v = src[i];
795
551
        dest[i] = Py_NewRef(v);
796
551
    }
797
87
    Py_SET_SIZE(np, size);
798
87
    return (PyObject *)np;
799
87
}
800
801
PyObject *
802
_PyList_Concat(PyObject *aa, PyObject *bb)
803
87
{
804
87
    if (!PyList_Check(bb)) {
805
0
        PyErr_Format(PyExc_TypeError,
806
0
                  "can only concatenate list (not \"%.200s\") to list",
807
0
                  Py_TYPE(bb)->tp_name);
808
0
        return NULL;
809
0
    }
810
87
    PyListObject *a = (PyListObject *)aa;
811
87
    PyListObject *b = (PyListObject *)bb;
812
87
    PyObject *ret;
813
87
    Py_BEGIN_CRITICAL_SECTION2(a, b);
814
87
    ret = list_concat_lock_held(a, b);
815
87
    Py_END_CRITICAL_SECTION2();
816
87
    return ret;
817
87
}
818
819
static PyObject *
820
list_repeat_lock_held(PyListObject *a, Py_ssize_t n)
821
2.83k
{
822
2.83k
    const Py_ssize_t input_size = Py_SIZE(a);
823
2.83k
    if (input_size == 0 || n <= 0)
824
0
        return PyList_New(0);
825
2.83k
    assert(n > 0);
826
827
2.83k
    if (input_size > PY_SSIZE_T_MAX / n)
828
0
        return PyErr_NoMemory();
829
2.83k
    Py_ssize_t output_size = input_size * n;
830
831
2.83k
    PyListObject *np = (PyListObject *) list_new_prealloc(output_size);
832
2.83k
    if (np == NULL)
833
0
        return NULL;
834
835
2.83k
    PyObject **dest = np->ob_item;
836
2.83k
    if (input_size == 1) {
837
2.83k
        PyObject *elem = a->ob_item[0];
838
2.83k
        _Py_RefcntAdd(elem, n);
839
2.83k
        PyObject **dest_end = dest + output_size;
840
1.55M
        while (dest < dest_end) {
841
1.55M
            *dest++ = elem;
842
1.55M
        }
843
2.83k
    }
844
0
    else {
845
0
        PyObject **src = a->ob_item;
846
0
        PyObject **src_end = src + input_size;
847
0
        while (src < src_end) {
848
0
            _Py_RefcntAdd(*src, n);
849
0
            *dest++ = *src++;
850
0
        }
851
        // This list is not yet visible to other threads, so atomic repeat
852
        // is not necessary even in Py_GIL_DISABLED builds.
853
0
        _Py_memory_repeat((char *)np->ob_item, sizeof(PyObject *)*output_size,
854
0
                                        sizeof(PyObject *)*input_size);
855
0
    }
856
857
2.83k
    Py_SET_SIZE(np, output_size);
858
2.83k
    return (PyObject *) np;
859
2.83k
}
860
861
static PyObject *
862
list_repeat(PyObject *aa, Py_ssize_t n)
863
2.83k
{
864
2.83k
    PyObject *ret;
865
2.83k
    PyListObject *a = (PyListObject *)aa;
866
2.83k
    Py_BEGIN_CRITICAL_SECTION(a);
867
2.83k
    ret = list_repeat_lock_held(a, n);
868
2.83k
    Py_END_CRITICAL_SECTION();
869
2.83k
    return ret;
870
2.83k
}
871
872
static void
873
list_clear_impl(PyListObject *a, bool is_resize)
874
8.11k
{
875
8.11k
    PyObject **items = a->ob_item;
876
8.11k
    if (items == NULL) {
877
0
        return;
878
0
    }
879
880
    /* Because XDECREF can recursively invoke operations on
881
       this list, we make it empty first. */
882
8.11k
    Py_ssize_t i = Py_SIZE(a);
883
8.11k
    Py_SET_SIZE(a, 0);
884
8.11k
    FT_ATOMIC_STORE_PTR_RELEASE(a->ob_item, NULL);
885
8.11k
    a->allocated = 0;
886
16.2k
    while (--i >= 0) {
887
8.11k
        Py_XDECREF(items[i]);
888
8.11k
    }
889
#ifdef Py_GIL_DISABLED
890
    if (is_resize) {
891
        ensure_shared_on_resize(a);
892
    }
893
    bool use_qsbr = is_resize && _PyObject_GC_IS_SHARED(a);
894
#else
895
8.11k
    bool use_qsbr = false;
896
8.11k
#endif
897
8.11k
    free_list_items(items, use_qsbr);
898
    // Note that there is no guarantee that the list is actually empty
899
    // at this point, because XDECREF may have populated it indirectly again!
900
8.11k
}
901
902
static void
903
list_clear(PyListObject *a)
904
8.11k
{
905
8.11k
    list_clear_impl(a, true);
906
8.11k
}
907
908
static int
909
list_clear_slot(PyObject *self)
910
0
{
911
0
    list_clear_impl((PyListObject *)self, false);
912
0
    return 0;
913
0
}
914
915
// Pointer-by-pointer memmove for PyObject** arrays that is safe
916
// for shared lists in Py_GIL_DISABLED builds.
917
static void
918
ptr_wise_atomic_memmove(PyListObject *a, PyObject **dest, PyObject **src, Py_ssize_t n)
919
437k
{
920
437k
#ifndef Py_GIL_DISABLED
921
437k
    memmove(dest, src, n * sizeof(PyObject *));
922
#else
923
    _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(a);
924
    if (_Py_IsOwnedByCurrentThread((PyObject *)a) && !_PyObject_GC_IS_SHARED(a)) {
925
        // No other threads can read this list concurrently
926
        memmove(dest, src, n * sizeof(PyObject *));
927
        return;
928
    }
929
    if (dest < src) {
930
        for (Py_ssize_t i = 0; i != n; i++) {
931
            _Py_atomic_store_ptr_release(&dest[i], src[i]);
932
        }
933
    }
934
    else {
935
        // copy backwards to avoid overwriting src before it's read
936
        for (Py_ssize_t i = n; i != 0; i--) {
937
            _Py_atomic_store_ptr_release(&dest[i - 1], src[i - 1]);
938
        }
939
    }
940
#endif
941
437k
}
942
943
/* a[ilow:ihigh] = v if v != NULL.
944
 * del a[ilow:ihigh] if v == NULL.
945
 *
946
 * Special speed gimmick:  when v is NULL and ihigh - ilow <= 8, it's
947
 * guaranteed the call cannot fail.
948
 */
949
static int
950
list_ass_slice_lock_held(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
951
444k
{
952
    /* Because [X]DECREF can recursively invoke list operations on
953
       this list, we must postpone all [X]DECREF activity until
954
       after the list is back in its canonical shape.  Therefore
955
       we must allocate an additional array, 'recycle', into which
956
       we temporarily copy the items that are deleted from the
957
       list. :-( */
958
444k
    PyObject *recycle_on_stack[8];
959
444k
    PyObject **recycle = recycle_on_stack; /* will allocate more if needed */
960
444k
    PyObject **item;
961
444k
    PyObject **vitem = NULL;
962
444k
    PyObject *v_as_SF = NULL; /* PySequence_Fast(v) */
963
444k
    Py_ssize_t n; /* # of elements in replacement list */
964
444k
    Py_ssize_t norig; /* # of elements in list getting replaced */
965
444k
    Py_ssize_t d; /* Change in size */
966
444k
    Py_ssize_t k;
967
444k
    size_t s;
968
444k
    int result = -1;            /* guilty until proved innocent */
969
444k
#define b ((PyListObject *)v)
970
444k
    if (v == NULL)
971
48.4k
        n = 0;
972
396k
    else {
973
396k
        v_as_SF = PySequence_Fast(v, "can only assign an iterable");
974
396k
        if(v_as_SF == NULL)
975
0
            goto Error;
976
396k
        n = PySequence_Fast_GET_SIZE(v_as_SF);
977
396k
        vitem = PySequence_Fast_ITEMS(v_as_SF);
978
396k
    }
979
444k
    if (ilow < 0)
980
0
        ilow = 0;
981
444k
    else if (ilow > Py_SIZE(a))
982
375k
        ilow = Py_SIZE(a);
983
984
444k
    if (ihigh < ilow)
985
0
        ihigh = ilow;
986
444k
    else if (ihigh > Py_SIZE(a))
987
375k
        ihigh = Py_SIZE(a);
988
989
444k
    norig = ihigh - ilow;
990
444k
    assert(norig >= 0);
991
444k
    d = n - norig;
992
444k
    if (Py_SIZE(a) + d == 0) {
993
6.66k
        Py_XDECREF(v_as_SF);
994
6.66k
        list_clear(a);
995
6.66k
        return 0;
996
6.66k
    }
997
437k
    item = a->ob_item;
998
    /* recycle the items that we are about to remove */
999
437k
    s = norig * sizeof(PyObject *);
1000
    /* If norig == 0, item might be NULL, in which case we may not memcpy from it. */
1001
437k
    if (s) {
1002
62.0k
        if (s > sizeof(recycle_on_stack)) {
1003
1.43k
            recycle = (PyObject **)PyMem_Malloc(s);
1004
1.43k
            if (recycle == NULL) {
1005
0
                PyErr_NoMemory();
1006
0
                goto Error;
1007
0
            }
1008
1.43k
        }
1009
62.0k
        memcpy(recycle, &item[ilow], s);
1010
62.0k
    }
1011
1012
437k
    if (d < 0) { /* Delete -d items */
1013
52.7k
        Py_ssize_t tail = Py_SIZE(a) - ihigh;
1014
52.7k
        ptr_wise_atomic_memmove(a, &item[ihigh+d], &item[ihigh], tail);
1015
52.7k
        (void)list_resize(a, Py_SIZE(a) + d); // NB: shrinking a list can't fail
1016
52.7k
        item = a->ob_item;
1017
52.7k
    }
1018
385k
    else if (d > 0) { /* Insert d items */
1019
384k
        k = Py_SIZE(a);
1020
384k
        if (list_resize(a, k+d) < 0)
1021
0
            goto Error;
1022
384k
        item = a->ob_item;
1023
384k
        ptr_wise_atomic_memmove(a, &item[ihigh+d], &item[ihigh], k - ihigh);
1024
384k
    }
1025
67.2M
    for (k = 0; k < n; k++, ilow++) {
1026
66.8M
        PyObject *w = vitem[k];
1027
66.8M
        FT_ATOMIC_STORE_PTR_RELEASE(item[ilow], Py_XNewRef(w));
1028
66.8M
    }
1029
575k
    for (k = norig - 1; k >= 0; --k)
1030
137k
        Py_XDECREF(recycle[k]);
1031
437k
    result = 0;
1032
437k
 Error:
1033
437k
    if (recycle != recycle_on_stack)
1034
1.43k
        PyMem_Free(recycle);
1035
437k
    Py_XDECREF(v_as_SF);
1036
437k
    return result;
1037
437k
#undef b
1038
437k
}
1039
1040
static int
1041
list_ass_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
1042
424k
{
1043
424k
    int ret;
1044
424k
    if (a == (PyListObject *)v) {
1045
0
        Py_BEGIN_CRITICAL_SECTION(a);
1046
0
        Py_ssize_t n = PyList_GET_SIZE(a);
1047
0
        PyObject *copy = list_slice_lock_held(a, 0, n);
1048
0
        if (copy == NULL) {
1049
0
            ret = -1;
1050
0
        }
1051
0
        else {
1052
0
            ret = list_ass_slice_lock_held(a, ilow, ihigh, copy);
1053
0
            Py_DECREF(copy);
1054
0
        }
1055
0
        Py_END_CRITICAL_SECTION();
1056
0
    }
1057
424k
    else if (v != NULL && PyList_CheckExact(v)) {
1058
194k
        Py_BEGIN_CRITICAL_SECTION2(a, v);
1059
194k
        ret = list_ass_slice_lock_held(a, ilow, ihigh, v);
1060
194k
        Py_END_CRITICAL_SECTION2();
1061
194k
    }
1062
229k
    else {
1063
229k
        Py_BEGIN_CRITICAL_SECTION(a);
1064
229k
        ret = list_ass_slice_lock_held(a, ilow, ihigh, v);
1065
229k
        Py_END_CRITICAL_SECTION();
1066
229k
    }
1067
424k
    return ret;
1068
424k
}
1069
1070
int
1071
PyList_SetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
1072
424k
{
1073
424k
    if (!PyList_Check(a)) {
1074
0
        PyErr_BadInternalCall();
1075
0
        return -1;
1076
0
    }
1077
424k
    return list_ass_slice((PyListObject *)a, ilow, ihigh, v);
1078
424k
}
1079
1080
static int
1081
list_inplace_repeat_lock_held(PyListObject *self, Py_ssize_t n)
1082
0
{
1083
0
    Py_ssize_t input_size = PyList_GET_SIZE(self);
1084
0
    if (input_size == 0 || n == 1) {
1085
0
        return 0;
1086
0
    }
1087
1088
0
    if (n < 1) {
1089
0
        list_clear(self);
1090
0
        return 0;
1091
0
    }
1092
1093
0
    if (input_size > PY_SSIZE_T_MAX / n) {
1094
0
        PyErr_NoMemory();
1095
0
        return -1;
1096
0
    }
1097
0
    Py_ssize_t output_size = input_size * n;
1098
1099
0
    if (list_resize(self, output_size) < 0) {
1100
0
        return -1;
1101
0
    }
1102
1103
0
    PyObject **items = self->ob_item;
1104
0
    for (Py_ssize_t j = 0; j < input_size; j++) {
1105
0
        _Py_RefcntAdd(items[j], n-1);
1106
0
    }
1107
0
#ifndef Py_GIL_DISABLED
1108
0
    _Py_memory_repeat((char *)items, sizeof(PyObject *)*output_size,
1109
0
                      sizeof(PyObject *)*input_size);
1110
#else
1111
    Py_ssize_t copied = input_size;
1112
    while (copied < output_size) {
1113
        Py_ssize_t items_to_copy = Py_MIN(copied, output_size - copied);
1114
        ptr_wise_atomic_memmove(self, items + copied, items, items_to_copy);
1115
        copied += items_to_copy;
1116
    }
1117
#endif
1118
0
    return 0;
1119
0
}
1120
1121
static PyObject *
1122
list_inplace_repeat(PyObject *_self, Py_ssize_t n)
1123
0
{
1124
0
    PyObject *ret;
1125
0
    PyListObject *self = (PyListObject *) _self;
1126
0
    Py_BEGIN_CRITICAL_SECTION(self);
1127
0
    if (list_inplace_repeat_lock_held(self, n) < 0) {
1128
0
        ret = NULL;
1129
0
    }
1130
0
    else {
1131
0
        ret = Py_NewRef(self);
1132
0
    }
1133
0
    Py_END_CRITICAL_SECTION();
1134
0
    return ret;
1135
0
}
1136
1137
static int
1138
list_ass_item_lock_held(PyListObject *a, Py_ssize_t i, PyObject *v)
1139
1.71M
{
1140
1.71M
    if (!valid_index(i, Py_SIZE(a))) {
1141
0
        PyErr_SetString(PyExc_IndexError,
1142
0
                        "list assignment index out of range");
1143
0
        return -1;
1144
0
    }
1145
1.71M
    PyObject *tmp = a->ob_item[i];
1146
1.71M
    if (v == NULL) {
1147
28.2k
        Py_ssize_t size = Py_SIZE(a);
1148
501k
        for (Py_ssize_t idx = i; idx < size - 1; idx++) {
1149
473k
            FT_ATOMIC_STORE_PTR_RELEASE(a->ob_item[idx], a->ob_item[idx + 1]);
1150
473k
        }
1151
28.2k
        Py_SET_SIZE(a, size - 1);
1152
28.2k
    }
1153
1.69M
    else {
1154
1.69M
        FT_ATOMIC_STORE_PTR_RELEASE(a->ob_item[i], Py_NewRef(v));
1155
1.69M
    }
1156
1.71M
    Py_DECREF(tmp);
1157
1.71M
    return 0;
1158
1.71M
}
1159
1160
static int
1161
list_ass_item(PyObject *aa, Py_ssize_t i, PyObject *v)
1162
16.5k
{
1163
16.5k
    int ret;
1164
16.5k
    PyListObject *a = (PyListObject *)aa;
1165
16.5k
    Py_BEGIN_CRITICAL_SECTION(a);
1166
16.5k
    ret = list_ass_item_lock_held(a, i, v);
1167
16.5k
    Py_END_CRITICAL_SECTION();
1168
16.5k
    return ret;
1169
16.5k
}
1170
1171
/*[clinic input]
1172
@critical_section
1173
list.insert
1174
1175
    index: Py_ssize_t
1176
    object: object
1177
    /
1178
1179
Insert object before index.
1180
[clinic start generated code]*/
1181
1182
static PyObject *
1183
list_insert_impl(PyListObject *self, Py_ssize_t index, PyObject *object)
1184
/*[clinic end generated code: output=7f35e32f60c8cb78 input=b1987ca998a4ae2d]*/
1185
7.79k
{
1186
7.79k
    if (ins1(self, index, object) == 0) {
1187
7.79k
        Py_RETURN_NONE;
1188
7.79k
    }
1189
0
    return NULL;
1190
7.79k
}
1191
1192
/*[clinic input]
1193
@critical_section
1194
list.clear as py_list_clear
1195
1196
Remove all items from list.
1197
[clinic start generated code]*/
1198
1199
static PyObject *
1200
py_list_clear_impl(PyListObject *self)
1201
/*[clinic end generated code: output=83726743807e3518 input=e285b7f09051a9ba]*/
1202
0
{
1203
0
    list_clear(self);
1204
0
    Py_RETURN_NONE;
1205
0
}
1206
1207
/*[clinic input]
1208
@critical_section
1209
list.copy
1210
1211
Return a shallow copy of the list.
1212
[clinic start generated code]*/
1213
1214
static PyObject *
1215
list_copy_impl(PyListObject *self)
1216
/*[clinic end generated code: output=ec6b72d6209d418e input=81c54b0c7bb4f73d]*/
1217
0
{
1218
0
    return list_slice_lock_held(self, 0, Py_SIZE(self));
1219
0
}
1220
1221
/*[clinic input]
1222
@critical_section
1223
list.append
1224
1225
     object: object
1226
     /
1227
1228
Append object to the end of the list.
1229
[clinic start generated code]*/
1230
1231
static PyObject *
1232
list_append_impl(PyListObject *self, PyObject *object)
1233
/*[clinic end generated code: output=78423561d92ed405 input=122b0853de54004f]*/
1234
22.5M
{
1235
22.5M
    if (_PyList_AppendTakeRef(self, Py_NewRef(object)) < 0) {
1236
0
        return NULL;
1237
0
    }
1238
22.5M
    Py_RETURN_NONE;
1239
22.5M
}
1240
1241
static int
1242
list_extend_fast(PyListObject *self, PyObject *iterable)
1243
250k
{
1244
250k
    Py_ssize_t n = PySequence_Fast_GET_SIZE(iterable);
1245
250k
    if (n == 0) {
1246
        /* short circuit when iterable is empty */
1247
103k
        return 0;
1248
103k
    }
1249
1250
146k
    Py_ssize_t m = Py_SIZE(self);
1251
    // It should not be possible to allocate a list large enough to cause
1252
    // an overflow on any relevant platform.
1253
146k
    assert(m < PY_SSIZE_T_MAX - n);
1254
146k
    if (self->ob_item == NULL) {
1255
76.2k
        if (list_preallocate_exact(self, n) < 0) {
1256
0
            return -1;
1257
0
        }
1258
76.2k
        Py_SET_SIZE(self, n);
1259
76.2k
    }
1260
70.7k
    else if (list_resize(self, m + n) < 0) {
1261
0
        return -1;
1262
0
    }
1263
1264
    // note that we may still have self == iterable here for the
1265
    // situation a.extend(a), but the following code works
1266
    // in that case too.  Just make sure to resize self
1267
    // before calling PySequence_Fast_ITEMS.
1268
    //
1269
    // populate the end of self with iterable's items.
1270
146k
    PyObject **src = PySequence_Fast_ITEMS(iterable);
1271
146k
    PyObject **dest = self->ob_item + m;
1272
3.10M
    for (Py_ssize_t i = 0; i < n; i++) {
1273
2.95M
        PyObject *o = src[i];
1274
2.95M
        FT_ATOMIC_STORE_PTR_RELEASE(dest[i], Py_NewRef(o));
1275
2.95M
    }
1276
146k
    return 0;
1277
146k
}
1278
1279
static int
1280
list_extend_iter_lock_held(PyListObject *self, PyObject *iterable)
1281
561k
{
1282
561k
    PyObject *it = PyObject_GetIter(iterable);
1283
561k
    if (it == NULL) {
1284
0
        return -1;
1285
0
    }
1286
561k
    PyObject *(*iternext)(PyObject *) = *Py_TYPE(it)->tp_iternext;
1287
1288
    /* Guess a result list size. */
1289
561k
    Py_ssize_t n = PyObject_LengthHint(iterable, 8);
1290
561k
    if (n < 0) {
1291
0
        Py_DECREF(it);
1292
0
        return -1;
1293
0
    }
1294
1295
561k
    Py_ssize_t m = Py_SIZE(self);
1296
561k
    if (m > PY_SSIZE_T_MAX - n) {
1297
        /* m + n overflowed; on the chance that n lied, and there really
1298
         * is enough room, ignore it.  If n was telling the truth, we'll
1299
         * eventually run out of memory during the loop.
1300
         */
1301
0
    }
1302
561k
    else if (self->ob_item == NULL) {
1303
561k
        if (n && list_preallocate_exact(self, n) < 0)
1304
0
            goto error;
1305
561k
    }
1306
158
    else {
1307
        /* Make room. */
1308
158
        if (list_resize(self, m + n) < 0) {
1309
0
            goto error;
1310
0
        }
1311
1312
        /* Make the list sane again. */
1313
158
        Py_SET_SIZE(self, m);
1314
158
    }
1315
1316
    /* Run iterator to exhaustion. */
1317
78.3M
    for (;;) {
1318
78.3M
        PyObject *item = iternext(it);
1319
78.3M
        if (item == NULL) {
1320
561k
            if (PyErr_Occurred()) {
1321
0
                if (PyErr_ExceptionMatches(PyExc_StopIteration))
1322
0
                    PyErr_Clear();
1323
0
                else
1324
0
                    goto error;
1325
0
            }
1326
561k
            break;
1327
561k
        }
1328
1329
77.7M
        if (Py_SIZE(self) < self->allocated) {
1330
77.7M
            Py_ssize_t len = Py_SIZE(self);
1331
77.7M
            FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item[len], item);  // steals item ref
1332
77.7M
            Py_SET_SIZE(self, len + 1);
1333
77.7M
        }
1334
0
        else {
1335
0
            if (_PyList_AppendTakeRef(self, item) < 0)
1336
0
                goto error;
1337
0
        }
1338
77.7M
    }
1339
1340
    /* Cut back result list if initial guess was too large. */
1341
561k
    if (Py_SIZE(self) < self->allocated) {
1342
73.9k
        if (list_resize(self, Py_SIZE(self)) < 0)
1343
0
            goto error;
1344
73.9k
    }
1345
1346
561k
    Py_DECREF(it);
1347
561k
    return 0;
1348
1349
0
  error:
1350
0
    Py_DECREF(it);
1351
0
    return -1;
1352
561k
}
1353
1354
static int
1355
list_extend_lock_held(PyListObject *self, PyObject *iterable)
1356
250k
{
1357
250k
    PyObject *seq = PySequence_Fast(iterable, "argument must be iterable");
1358
250k
    if (!seq) {
1359
0
        return -1;
1360
0
    }
1361
1362
250k
    int res = list_extend_fast(self, seq);
1363
250k
    Py_DECREF(seq);
1364
250k
    return res;
1365
250k
}
1366
1367
static int
1368
list_extend_set(PyListObject *self, PySetObject *other)
1369
28.1k
{
1370
28.1k
    Py_ssize_t m = Py_SIZE(self);
1371
28.1k
    Py_ssize_t n = PySet_GET_SIZE(other);
1372
28.1k
    Py_ssize_t r = m + n;
1373
28.1k
    if (r == 0) {
1374
4.91k
        return 0;
1375
4.91k
    }
1376
23.2k
    if (list_resize(self, r) < 0) {
1377
0
        return -1;
1378
0
    }
1379
1380
23.2k
    assert(self->ob_item != NULL);
1381
    /* populate the end of self with iterable's items */
1382
23.2k
    Py_ssize_t setpos = 0;
1383
23.2k
    Py_hash_t hash;
1384
23.2k
    PyObject *key;
1385
23.2k
    PyObject **dest = self->ob_item + m;
1386
97.8k
    while (_PySet_NextEntryRef((PyObject *)other, &setpos, &key, &hash)) {
1387
74.6k
        FT_ATOMIC_STORE_PTR_RELEASE(*dest, key);
1388
74.6k
        dest++;
1389
74.6k
    }
1390
23.2k
    Py_SET_SIZE(self, r);
1391
23.2k
    return 0;
1392
23.2k
}
1393
1394
static int
1395
list_extend_dict(PyListObject *self, PyDictObject *dict, int which_item)
1396
284k
{
1397
    // which_item: 0 for keys and 1 for values
1398
284k
    Py_ssize_t m = Py_SIZE(self);
1399
284k
    Py_ssize_t n = PyDict_GET_SIZE(dict);
1400
284k
    Py_ssize_t r = m + n;
1401
284k
    if (r == 0) {
1402
0
        return 0;
1403
0
    }
1404
284k
    if (list_resize(self, r) < 0) {
1405
0
        return -1;
1406
0
    }
1407
1408
284k
    assert(self->ob_item != NULL);
1409
284k
    PyObject **dest = self->ob_item + m;
1410
284k
    Py_ssize_t pos = 0;
1411
284k
    PyObject *keyvalue[2];
1412
1.16M
    while (_PyDict_Next((PyObject *)dict, &pos, &keyvalue[0], &keyvalue[1], NULL)) {
1413
881k
        PyObject *obj = keyvalue[which_item];
1414
881k
        Py_INCREF(obj);
1415
881k
        FT_ATOMIC_STORE_PTR_RELEASE(*dest, obj);
1416
881k
        dest++;
1417
881k
    }
1418
1419
284k
    Py_SET_SIZE(self, r);
1420
284k
    return 0;
1421
284k
}
1422
1423
static int
1424
list_extend_dictitems(PyListObject *self, PyDictObject *dict)
1425
4
{
1426
4
    Py_ssize_t m = Py_SIZE(self);
1427
4
    Py_ssize_t n = PyDict_GET_SIZE(dict);
1428
4
    Py_ssize_t r = m + n;
1429
4
    if (r == 0) {
1430
0
        return 0;
1431
0
    }
1432
4
    if (list_resize(self, r) < 0) {
1433
0
        return -1;
1434
0
    }
1435
1436
4
    assert(self->ob_item != NULL);
1437
4
    PyObject **dest = self->ob_item + m;
1438
4
    Py_ssize_t pos = 0;
1439
4
    Py_ssize_t i = 0;
1440
4
    PyObject *key, *value;
1441
180
    while (_PyDict_Next((PyObject *)dict, &pos, &key, &value, NULL)) {
1442
176
        PyObject *item = _PyTuple_FromPair(key, value);
1443
176
        if (item == NULL) {
1444
0
            Py_SET_SIZE(self, m + i);
1445
0
            return -1;
1446
0
        }
1447
176
        FT_ATOMIC_STORE_PTR_RELEASE(*dest, item);
1448
176
        dest++;
1449
176
        i++;
1450
176
    }
1451
1452
4
    Py_SET_SIZE(self, r);
1453
4
    return 0;
1454
4
}
1455
1456
static int
1457
_list_extend(PyListObject *self, PyObject *iterable)
1458
1.12M
{
1459
    // Special case:
1460
    // lists and tuples which can use PySequence_Fast ops
1461
1.12M
    int res = -1;
1462
1.12M
    if ((PyObject *)self == iterable) {
1463
0
        Py_BEGIN_CRITICAL_SECTION(self);
1464
0
        res = list_inplace_repeat_lock_held(self, 2);
1465
0
        Py_END_CRITICAL_SECTION();
1466
0
    }
1467
1.12M
    else if (PyList_CheckExact(iterable)) {
1468
248k
        Py_BEGIN_CRITICAL_SECTION2(self, iterable);
1469
248k
        res = list_extend_lock_held(self, iterable);
1470
248k
        Py_END_CRITICAL_SECTION2();
1471
248k
    }
1472
875k
    else if (PyTuple_CheckExact(iterable)) {
1473
1.45k
        Py_BEGIN_CRITICAL_SECTION(self);
1474
1.45k
        res = list_extend_lock_held(self, iterable);
1475
1.45k
        Py_END_CRITICAL_SECTION();
1476
1.45k
    }
1477
874k
    else if (PyAnySet_CheckExact(iterable)) {
1478
28.1k
        Py_BEGIN_CRITICAL_SECTION2(self, iterable);
1479
28.1k
        res = list_extend_set(self, (PySetObject *)iterable);
1480
28.1k
        Py_END_CRITICAL_SECTION2();
1481
28.1k
    }
1482
846k
    else if (PyDict_CheckExact(iterable)) {
1483
284k
        Py_BEGIN_CRITICAL_SECTION2(self, iterable);
1484
284k
        res = list_extend_dict(self, (PyDictObject *)iterable, 0 /*keys*/);
1485
284k
        Py_END_CRITICAL_SECTION2();
1486
284k
    }
1487
561k
    else if (Py_IS_TYPE(iterable, &PyDictKeys_Type)) {
1488
0
        PyDictObject *dict = ((_PyDictViewObject *)iterable)->dv_dict;
1489
0
        Py_BEGIN_CRITICAL_SECTION2(self, dict);
1490
0
        res = list_extend_dict(self, dict, 0 /*keys*/);
1491
0
        Py_END_CRITICAL_SECTION2();
1492
0
    }
1493
561k
    else if (Py_IS_TYPE(iterable, &PyDictValues_Type)) {
1494
3
        PyDictObject *dict = ((_PyDictViewObject *)iterable)->dv_dict;
1495
3
        Py_BEGIN_CRITICAL_SECTION2(self, dict);
1496
3
        res = list_extend_dict(self, dict, 1 /*values*/);
1497
3
        Py_END_CRITICAL_SECTION2();
1498
3
    }
1499
561k
    else if (Py_IS_TYPE(iterable, &PyDictItems_Type)) {
1500
4
        PyDictObject *dict = ((_PyDictViewObject *)iterable)->dv_dict;
1501
4
        Py_BEGIN_CRITICAL_SECTION2(self, dict);
1502
4
        res = list_extend_dictitems(self, dict);
1503
4
        Py_END_CRITICAL_SECTION2();
1504
4
    }
1505
561k
    else {
1506
561k
        Py_BEGIN_CRITICAL_SECTION(self);
1507
561k
        res = list_extend_iter_lock_held(self, iterable);
1508
561k
        Py_END_CRITICAL_SECTION();
1509
561k
    }
1510
1.12M
    return res;
1511
1.12M
}
1512
1513
/*[clinic input]
1514
list.extend as list_extend
1515
1516
     iterable: object
1517
     /
1518
1519
Extend list by appending elements from the iterable.
1520
[clinic start generated code]*/
1521
1522
static PyObject *
1523
list_extend_impl(PyListObject *self, PyObject *iterable)
1524
/*[clinic end generated code: output=b0eba9e0b186d5ce input=979da7597a515791]*/
1525
695k
{
1526
695k
    if (_list_extend(self, iterable) < 0) {
1527
0
        return NULL;
1528
0
    }
1529
695k
    Py_RETURN_NONE;
1530
695k
}
1531
1532
PyObject *
1533
_PyList_Extend(PyListObject *self, PyObject *iterable)
1534
590k
{
1535
590k
    return list_extend((PyObject*)self, iterable);
1536
590k
}
1537
1538
int
1539
PyList_Extend(PyObject *self, PyObject *iterable)
1540
0
{
1541
0
    if (!PyList_Check(self)) {
1542
0
        PyErr_BadInternalCall();
1543
0
        return -1;
1544
0
    }
1545
0
    return _list_extend((PyListObject*)self, iterable);
1546
0
}
1547
1548
1549
int
1550
PyList_Clear(PyObject *self)
1551
0
{
1552
0
    if (!PyList_Check(self)) {
1553
0
        PyErr_BadInternalCall();
1554
0
        return -1;
1555
0
    }
1556
0
    Py_BEGIN_CRITICAL_SECTION(self);
1557
0
    list_clear((PyListObject*)self);
1558
0
    Py_END_CRITICAL_SECTION();
1559
0
    return 0;
1560
0
}
1561
1562
1563
static PyObject *
1564
list_inplace_concat(PyObject *_self, PyObject *other)
1565
143k
{
1566
143k
    PyListObject *self = (PyListObject *)_self;
1567
143k
    if (_list_extend(self, other) < 0) {
1568
0
        return NULL;
1569
0
    }
1570
143k
    return Py_NewRef(self);
1571
143k
}
1572
1573
/*[clinic input]
1574
@critical_section
1575
list.pop
1576
1577
    index: Py_ssize_t = -1
1578
    /
1579
1580
Remove and return item at index (default last).
1581
1582
Raises IndexError if list is empty or index is out of range.
1583
[clinic start generated code]*/
1584
1585
static PyObject *
1586
list_pop_impl(PyListObject *self, Py_ssize_t index)
1587
/*[clinic end generated code: output=6bd69dcb3f17eca8 input=c269141068ae4b8f]*/
1588
1.64k
{
1589
1.64k
    PyObject *v;
1590
1591
1.64k
    if (Py_SIZE(self) == 0) {
1592
        /* Special-case most common failure cause */
1593
0
        PyErr_SetString(PyExc_IndexError, "pop from empty list");
1594
0
        return NULL;
1595
0
    }
1596
1.64k
    if (index < 0)
1597
1.64k
        index += Py_SIZE(self);
1598
1.64k
    if (!valid_index(index, Py_SIZE(self))) {
1599
0
        PyErr_SetString(PyExc_IndexError, "pop index out of range");
1600
0
        return NULL;
1601
0
    }
1602
1603
1.64k
    PyObject **items = self->ob_item;
1604
1.64k
    v = items[index];
1605
1.64k
    if (Py_SIZE(self) == 1) {
1606
1.45k
        Py_INCREF(v);
1607
1.45k
        list_clear(self);
1608
1.45k
        return v;
1609
1.45k
    }
1610
193
    Py_ssize_t size_after_pop = Py_SIZE(self) - 1;
1611
193
    if (index < size_after_pop) {
1612
0
        ptr_wise_atomic_memmove(self, &items[index], &items[index+1],
1613
0
                                size_after_pop - index);
1614
0
    }
1615
193
    list_resize(self, size_after_pop);  // NB: shrinking a list can't fail
1616
193
    return v;
1617
1.64k
}
1618
1619
/* Reverse a slice of a list in place, from lo up to (exclusive) hi. */
1620
static void
1621
reverse_slice(PyObject **lo, PyObject **hi)
1622
24.6k
{
1623
24.6k
    assert(lo && hi);
1624
1625
24.6k
    --hi;
1626
49.7k
    while (lo < hi) {
1627
25.1k
        PyObject *t = *lo;
1628
25.1k
        FT_ATOMIC_STORE_PTR_RELEASE(*lo, *hi);
1629
25.1k
        FT_ATOMIC_STORE_PTR_RELEASE(*hi, t);
1630
25.1k
        ++lo;
1631
25.1k
        --hi;
1632
25.1k
    }
1633
24.6k
}
1634
1635
/* Lots of code for an adaptive, stable, natural mergesort.  There are many
1636
 * pieces to this algorithm; read listsort.txt for overviews and details.
1637
 */
1638
1639
/* A sortslice contains a pointer to an array of keys and a pointer to
1640
 * an array of corresponding values.  In other words, keys[i]
1641
 * corresponds with values[i].  If values == NULL, then the keys are
1642
 * also the values.
1643
 *
1644
 * Several convenience routines are provided here, so that keys and
1645
 * values are always moved in sync.
1646
 */
1647
1648
typedef struct {
1649
    PyObject **keys;
1650
    PyObject **values;
1651
} sortslice;
1652
1653
Py_LOCAL_INLINE(void)
1654
sortslice_copy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j)
1655
284
{
1656
284
    s1->keys[i] = s2->keys[j];
1657
284
    if (s1->values != NULL)
1658
0
        s1->values[i] = s2->values[j];
1659
284
}
1660
1661
Py_LOCAL_INLINE(void)
1662
sortslice_copy_incr(sortslice *dst, sortslice *src)
1663
28.9k
{
1664
28.9k
    *dst->keys++ = *src->keys++;
1665
28.9k
    if (dst->values != NULL)
1666
0
        *dst->values++ = *src->values++;
1667
28.9k
}
1668
1669
Py_LOCAL_INLINE(void)
1670
sortslice_copy_decr(sortslice *dst, sortslice *src)
1671
19.0k
{
1672
19.0k
    *dst->keys-- = *src->keys--;
1673
19.0k
    if (dst->values != NULL)
1674
0
        *dst->values-- = *src->values--;
1675
19.0k
}
1676
1677
1678
Py_LOCAL_INLINE(void)
1679
sortslice_memcpy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
1680
                 Py_ssize_t n)
1681
2.04k
{
1682
2.04k
    memcpy(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
1683
2.04k
    if (s1->values != NULL)
1684
0
        memcpy(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
1685
2.04k
}
1686
1687
Py_LOCAL_INLINE(void)
1688
sortslice_memmove(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
1689
                  Py_ssize_t n)
1690
1.32k
{
1691
1.32k
    memmove(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
1692
1.32k
    if (s1->values != NULL)
1693
0
        memmove(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
1694
1.32k
}
1695
1696
Py_LOCAL_INLINE(void)
1697
sortslice_advance(sortslice *slice, Py_ssize_t n)
1698
55.4k
{
1699
55.4k
    slice->keys += n;
1700
55.4k
    if (slice->values != NULL)
1701
10
        slice->values += n;
1702
55.4k
}
1703
1704
/* Comparison function: ms->key_compare, which is set at run-time in
1705
 * listsort_impl to optimize for various special cases.
1706
 * Returns -1 on error, 1 if x < y, 0 if x >= y.
1707
 */
1708
1709
577k
#define ISLT(X, Y) (*(ms->key_compare))(X, Y, ms)
1710
1711
/* Compare X to Y via "<".  Goto "fail" if the comparison raises an
1712
   error.  Else "k" is set to true iff X<Y, and an "if (k)" block is
1713
   started.  It makes more sense in context <wink>.  X and Y are PyObject*s.
1714
*/
1715
533k
#define IFLT(X, Y) if ((k = ISLT(X, Y)) < 0) goto fail;  \
1716
533k
           if (k)
1717
1718
/* The maximum number of entries in a MergeState's pending-runs stack.
1719
 * For a list with n elements, this needs at most floor(log2(n)) + 1 entries
1720
 * even if we didn't force runs to a minimal length.  So the number of bits
1721
 * in a Py_ssize_t is plenty large enough for all cases.
1722
 */
1723
#define MAX_MERGE_PENDING (SIZEOF_SIZE_T * 8)
1724
1725
/* When we get into galloping mode, we stay there until both runs win less
1726
 * often than MIN_GALLOP consecutive times.  See listsort.txt for more info.
1727
 */
1728
75.9k
#define MIN_GALLOP 7
1729
1730
/* Avoid malloc for small temp arrays. */
1731
71.4k
#define MERGESTATE_TEMP_SIZE 256
1732
1733
/* The largest value of minrun. This must be a power of 2, and >= 1 */
1734
71.9k
#define MAX_MINRUN 64
1735
#if ((MAX_MINRUN) < 1) || ((MAX_MINRUN) & ((MAX_MINRUN) - 1))
1736
#error "MAX_MINRUN must be a power of 2, and >= 1"
1737
#endif
1738
1739
/* One MergeState exists on the stack per invocation of mergesort.  It's just
1740
 * a convenient way to pass state around among the helper functions.
1741
 */
1742
struct s_slice {
1743
    sortslice base;
1744
    Py_ssize_t len;   /* length of run */
1745
    int power; /* node "level" for powersort merge strategy */
1746
};
1747
1748
typedef struct s_MergeState MergeState;
1749
struct s_MergeState {
1750
    /* This controls when we get *into* galloping mode.  It's initialized
1751
     * to MIN_GALLOP.  merge_lo and merge_hi tend to nudge it higher for
1752
     * random data, and lower for highly structured data.
1753
     */
1754
    Py_ssize_t min_gallop;
1755
1756
    Py_ssize_t listlen;     /* len(input_list) - read only */
1757
    PyObject **basekeys;    /* base address of keys array - read only */
1758
1759
    /* 'a' is temp storage to help with merges.  It contains room for
1760
     * alloced entries.
1761
     */
1762
    sortslice a;        /* may point to temparray below */
1763
    Py_ssize_t alloced;
1764
1765
    /* A stack of n pending runs yet to be merged.  Run #i starts at
1766
     * address base[i] and extends for len[i] elements.  It's always
1767
     * true (so long as the indices are in bounds) that
1768
     *
1769
     *     pending[i].base + pending[i].len == pending[i+1].base
1770
     *
1771
     * so we could cut the storage for this, but it's a minor amount,
1772
     * and keeping all the info explicit simplifies the code.
1773
     */
1774
    int n;
1775
    struct s_slice pending[MAX_MERGE_PENDING];
1776
1777
    /* 'a' points to this when possible, rather than muck with malloc. */
1778
    PyObject *temparray[MERGESTATE_TEMP_SIZE];
1779
1780
    /* This is the function we will use to compare two keys,
1781
     * even when none of our special cases apply and we have to use
1782
     * safe_object_compare. */
1783
    int (*key_compare)(PyObject *, PyObject *, MergeState *);
1784
1785
    /* This function is used by unsafe_object_compare to optimize comparisons
1786
     * when we know our list is type-homogeneous but we can't assume anything else.
1787
     * In the pre-sort check it is set equal to Py_TYPE(key)->tp_richcompare */
1788
    PyObject *(*key_richcompare)(PyObject *, PyObject *, int);
1789
1790
    /* This function is used by unsafe_tuple_compare to compare the first elements
1791
     * of tuples. It may be set to safe_object_compare, but the idea is that hopefully
1792
     * we can assume more, and use one of the special-case compares. */
1793
    int (*tuple_elem_compare)(PyObject *, PyObject *, MergeState *);
1794
1795
    /* Varisbles used for minrun computation. The "ideal" minrun length is
1796
     * the infinite precision listlen / 2**e. See listsort.txt.
1797
     */
1798
     Py_ssize_t mr_current, mr_e, mr_mask;
1799
};
1800
1801
/* binarysort is the best method for sorting small arrays: it does few
1802
   compares, but can do data movement quadratic in the number of elements.
1803
   ss->keys is viewed as an array of n kays, a[:n]. a[:ok] is already sorted.
1804
   Pass ok = 0 (or 1) if you don't know.
1805
   It's sorted in-place, by a stable binary insertion sort. If ss->values
1806
   isn't NULL, it's permuted in lockstap with ss->keys.
1807
   On entry, must have n >= 1, and 0 <= ok <= n <= MAX_MINRUN.
1808
   Return -1 if comparison raises an exception, else 0.
1809
   Even in case of error, the output slice will be some permutation of
1810
   the input (nothing is lost or duplicated).
1811
*/
1812
static int
1813
binarysort(MergeState *ms, const sortslice *ss, Py_ssize_t n, Py_ssize_t ok)
1814
19.9k
{
1815
19.9k
    Py_ssize_t k; /* for IFLT macro expansion */
1816
19.9k
    PyObject ** const a = ss->keys;
1817
19.9k
    PyObject ** const v = ss->values;
1818
19.9k
    const bool has_values = v != NULL;
1819
19.9k
    PyObject *pivot;
1820
19.9k
    Py_ssize_t M;
1821
1822
19.9k
    assert(0 <= ok && ok <= n && 1 <= n && n <= MAX_MINRUN);
1823
    /* assert a[:ok] is sorted */
1824
19.9k
    if (! ok)
1825
0
        ++ok;
1826
    /* Regular insertion sort has average- and worst-case O(n**2) cost
1827
       for both # of comparisons and number of bytes moved. But its branches
1828
       are highly predictable, and it loves sorted input (n-1 compares and no
1829
       data movement). This is significant in cases like sortperf.py's %sort,
1830
       where an out-of-order element near the start of a run is moved into
1831
       place slowly but then the remaining elements up to length minrun are
1832
       generally at worst one slot away from their correct position (so only
1833
       need 1 or 2 commpares to resolve). If comparisons are very fast (such
1834
       as for a list of Python floats), the simple inner loop leaves it
1835
       very competitive with binary insertion, despite that it does
1836
       significantly more compares overall on random data.
1837
1838
       Binary insertion sort has worst, average, and best case O(n log n)
1839
       cost for # of comparisons, but worst and average case O(n**2) cost
1840
       for data movement. The more expensive comparisons, the more important
1841
       the comparison advantage. But its branches are less predictable the
1842
       more "randomish" the data, and that's so significant its worst case
1843
       in real life is random input rather than reverse-ordered (which does
1844
       about twice the data movement than random input does).
1845
1846
       Note that the number of bytes moved doesn't seem to matter. MAX_MINRUN
1847
       of 64 is so small that the key and value pointers all fit in a corner
1848
       of L1 cache, and moving things around in that is very fast. */
1849
#if 0 // ordinary insertion sort.
1850
    PyObject * vpivot = NULL;
1851
    for (; ok < n; ++ok) {
1852
        pivot = a[ok];
1853
        if (has_values)
1854
            vpivot = v[ok];
1855
        for (M = ok - 1; M >= 0; --M) {
1856
            k = ISLT(pivot, a[M]);
1857
            if (k < 0) {
1858
                a[M + 1] = pivot;
1859
                if (has_values)
1860
                    v[M + 1] = vpivot;
1861
                goto fail;
1862
            }
1863
            else if (k) {
1864
                a[M + 1] = a[M];
1865
                if (has_values)
1866
                    v[M + 1] = v[M];
1867
            }
1868
            else
1869
                break;
1870
        }
1871
        a[M + 1] = pivot;
1872
        if (has_values)
1873
            v[M + 1] = vpivot;
1874
    }
1875
#else // binary insertion sort
1876
19.9k
    Py_ssize_t L, R;
1877
133k
    for (; ok < n; ++ok) {
1878
        /* set L to where a[ok] belongs */
1879
113k
        L = 0;
1880
113k
        R = ok;
1881
113k
        pivot = a[ok];
1882
        /* Slice invariants. vacuously true at the start:
1883
         * all a[0:L]  <= pivot
1884
         * all a[L:R]     unknown
1885
         * all a[R:ok]  > pivot
1886
         */
1887
113k
        assert(L < R);
1888
386k
        do {
1889
            /* don't do silly ;-) things to prevent overflow when finding
1890
               the midpoint; L and R are very far from filling a Py_ssize_t */
1891
386k
            M = (L + R) >> 1;
1892
386k
#if 1 // straightforward, but highly unpredictable branch on random data
1893
386k
            IFLT(pivot, a[M])
1894
203k
                R = M;
1895
182k
            else
1896
182k
                L = M + 1;
1897
#else
1898
            /* Try to get compiler to generate conditional move instructions
1899
               instead. Works fine, but leaving it disabled for now because
1900
               it's not yielding consistently faster sorts. Needs more
1901
               investigation. More computation in the inner loop adds its own
1902
               costs, which can be significant when compares are fast. */
1903
            k = ISLT(pivot, a[M]);
1904
            if (k < 0)
1905
                goto fail;
1906
            Py_ssize_t Mp1 = M + 1;
1907
            R = k ? M : R;
1908
            L = k ? L : Mp1;
1909
#endif
1910
386k
        } while (L < R);
1911
113k
        assert(L == R);
1912
        /* a[:L] holds all elements from a[:ok] <= pivot now, so pivot belongs
1913
           at index L. Slide a[L:ok] to the right a slot to make room for it.
1914
           Caution: using memmove is much slower under MSVC 5; we're not
1915
           usually moving many slots. Years later: under Visual Studio 2022,
1916
           memmove seems just slightly slower than doing it "by hand". */
1917
842k
        for (M = ok; M > L; --M)
1918
728k
            a[M] = a[M - 1];
1919
113k
        a[L] = pivot;
1920
113k
        if (has_values) {
1921
0
            pivot = v[ok];
1922
0
            for (M = ok; M > L; --M)
1923
0
                v[M] = v[M - 1];
1924
0
            v[L] = pivot;
1925
0
        }
1926
113k
    }
1927
19.9k
#endif // pick binary or regular insertion sort
1928
19.9k
    return 0;
1929
1930
0
 fail:
1931
0
    return -1;
1932
19.9k
}
1933
1934
static void
1935
sortslice_reverse(sortslice *s, Py_ssize_t n)
1936
24.5k
{
1937
24.5k
    reverse_slice(s->keys, &s->keys[n]);
1938
24.5k
    if (s->values != NULL)
1939
0
        reverse_slice(s->values, &s->values[n]);
1940
24.5k
}
1941
1942
/*
1943
Return the length of the run beginning at slo->keys, spanning no more than
1944
nremaining elements. The run beginning there may be ascending or descending,
1945
but the function permutes it in place, if needed, so that it's always ascending
1946
upon return.
1947
1948
Returns -1 in case of error.
1949
*/
1950
static Py_ssize_t
1951
count_run(MergeState *ms, sortslice *slo, Py_ssize_t nremaining)
1952
50.0k
{
1953
50.0k
    Py_ssize_t k; /* used by IFLT macro expansion */
1954
50.0k
    Py_ssize_t n;
1955
50.0k
    PyObject ** const lo = slo->keys;
1956
1957
    /* In general, as things go on we've established that the slice starts
1958
       with a monotone run of n elements, starting at lo. */
1959
1960
    /* We're n elements into the slice, and the most recent neq+1 elements are
1961
     * all equal. This reverses them in-place, and resets neq for reuse.
1962
     */
1963
50.0k
#define REVERSE_LAST_NEQ                        \
1964
50.0k
    if (neq) {                                  \
1965
0
        sortslice slice = *slo;                 \
1966
0
        ++neq;                                  \
1967
0
        sortslice_advance(&slice, n - neq);     \
1968
0
        sortslice_reverse(&slice, neq);         \
1969
0
        neq = 0;                                \
1970
0
    }
1971
1972
    /* Sticking to only __lt__ compares is confusing and error-prone. But in
1973
     * this routine, almost all uses of IFLT can be captured by tiny macros
1974
     * giving mnemonic names to the intent. Note that inline functions don't
1975
     * work for this (IFLT expands to code including `goto fail`).
1976
     */
1977
50.0k
#define IF_NEXT_LARGER  IFLT(lo[n-1], lo[n])
1978
110k
#define IF_NEXT_SMALLER IFLT(lo[n], lo[n-1])
1979
1980
50.0k
    assert(nremaining);
1981
    /* try ascending run first */
1982
92.1k
    for (n = 1; n < nremaining; ++n) {
1983
73.5k
        IF_NEXT_SMALLER
1984
31.4k
            break;
1985
73.5k
    }
1986
50.0k
    if (n == nremaining)
1987
18.5k
        return n;
1988
    /* lo[n] is strictly less */
1989
    /* If n is 1 now, then the first compare established it's a descending
1990
     * run, so fall through to the descending case. But if n > 1, there are
1991
     * n elements in an ascending run terminated by the strictly less lo[n].
1992
     * If the first key < lo[n-1], *somewhere* along the way the sequence
1993
     * increased, so we're done (there is no descending run).
1994
     * Else first key >= lo[n-1], which implies that the entire ascending run
1995
     * consists of equal elements. In that case, this is a descending run,
1996
     * and we reverse the all-equal prefix in-place.
1997
     */
1998
31.4k
    if (n > 1) {
1999
6.88k
        IFLT(lo[0], lo[n-1])
2000
6.88k
            return n;
2001
0
        sortslice_reverse(slo, n);
2002
0
    }
2003
24.5k
    ++n; /* in all cases it's been established that lo[n] has been resolved */
2004
2005
    /* Finish descending run. All-squal subruns are reversed in-place on the
2006
     * fly. Their original order will be restored at the end by the whole-slice
2007
     * reversal.
2008
     */
2009
24.5k
    Py_ssize_t neq = 0;
2010
28.5k
    for ( ; n < nremaining; ++n) {
2011
19.6k
        IF_NEXT_SMALLER {
2012
            /* This ends the most recent run of equal elements, but still in
2013
             * the "descending" direction.
2014
             */
2015
3.96k
            REVERSE_LAST_NEQ
2016
3.96k
        }
2017
15.7k
        else {
2018
15.7k
            IF_NEXT_LARGER /* descending run is over */
2019
15.7k
                break;
2020
0
            else /* not x < y and not y < x implies x == y */
2021
0
                ++neq;
2022
15.7k
        }
2023
19.6k
    }
2024
24.5k
    REVERSE_LAST_NEQ
2025
24.5k
    sortslice_reverse(slo, n); /* transform to ascending run */
2026
2027
    /* And after reversing, it's possible this can be extended by a
2028
     * naturally increasing suffix; e.g., [3, 2, 3, 4, 1] makes an
2029
     * ascending run from the first 4 elements.
2030
     */
2031
28.7k
    for ( ; n < nremaining; ++n) {
2032
17.2k
        IF_NEXT_SMALLER
2033
13.0k
            break;
2034
17.2k
    }
2035
2036
24.5k
    return n;
2037
0
fail:
2038
0
    return -1;
2039
2040
24.5k
#undef REVERSE_LAST_NEQ
2041
24.5k
#undef IF_NEXT_SMALLER
2042
24.5k
#undef IF_NEXT_LARGER
2043
24.5k
}
2044
2045
/*
2046
Locate the proper position of key in a sorted vector; if the vector contains
2047
an element equal to key, return the position immediately to the left of
2048
the leftmost equal element.  [gallop_right() does the same except returns
2049
the position to the right of the rightmost equal element (if any).]
2050
2051
"a" is a sorted vector with n elements, starting at a[0].  n must be > 0.
2052
2053
"hint" is an index at which to begin the search, 0 <= hint < n.  The closer
2054
hint is to the final result, the faster this runs.
2055
2056
The return value is the int k in 0..n such that
2057
2058
    a[k-1] < key <= a[k]
2059
2060
pretending that *(a-1) is minus infinity and a[n] is plus infinity.  IOW,
2061
key belongs at index k; or, IOW, the first k elements of a should precede
2062
key, and the last n-k should follow key.
2063
2064
Returns -1 on error.  See listsort.txt for info on the method.
2065
*/
2066
static Py_ssize_t
2067
gallop_left(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
2068
2.29k
{
2069
2.29k
    Py_ssize_t ofs;
2070
2.29k
    Py_ssize_t lastofs;
2071
2.29k
    Py_ssize_t k;
2072
2073
2.29k
    assert(key && a && n > 0 && hint >= 0 && hint < n);
2074
2075
2.29k
    a += hint;
2076
2.29k
    lastofs = 0;
2077
2.29k
    ofs = 1;
2078
2.29k
    IFLT(*a, key) {
2079
        /* a[hint] < key -- gallop right, until
2080
         * a[hint + lastofs] < key <= a[hint + ofs]
2081
         */
2082
1.20k
        const Py_ssize_t maxofs = n - hint;             /* &a[n-1] is highest */
2083
2.04k
        while (ofs < maxofs) {
2084
1.40k
            IFLT(a[ofs], key) {
2085
841
                lastofs = ofs;
2086
841
                assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
2087
841
                ofs = (ofs << 1) + 1;
2088
841
            }
2089
567
            else                /* key <= a[hint + ofs] */
2090
567
                break;
2091
1.40k
        }
2092
1.20k
        if (ofs > maxofs)
2093
36
            ofs = maxofs;
2094
        /* Translate back to offsets relative to &a[0]. */
2095
1.20k
        lastofs += hint;
2096
1.20k
        ofs += hint;
2097
1.20k
    }
2098
1.09k
    else {
2099
        /* key <= a[hint] -- gallop left, until
2100
         * a[hint - ofs] < key <= a[hint - lastofs]
2101
         */
2102
1.09k
        const Py_ssize_t maxofs = hint + 1;             /* &a[0] is lowest */
2103
2.28k
        while (ofs < maxofs) {
2104
1.89k
            IFLT(*(a-ofs), key)
2105
706
                break;
2106
            /* key <= a[hint - ofs] */
2107
1.18k
            lastofs = ofs;
2108
1.18k
            assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
2109
1.18k
            ofs = (ofs << 1) + 1;
2110
1.18k
        }
2111
1.09k
        if (ofs > maxofs)
2112
30
            ofs = maxofs;
2113
        /* Translate back to positive offsets relative to &a[0]. */
2114
1.09k
        k = lastofs;
2115
1.09k
        lastofs = hint - ofs;
2116
1.09k
        ofs = hint - k;
2117
1.09k
    }
2118
2.29k
    a -= hint;
2119
2120
2.29k
    assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
2121
    /* Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
2122
     * right of lastofs but no farther right than ofs.  Do a binary
2123
     * search, with invariant a[lastofs-1] < key <= a[ofs].
2124
     */
2125
2.29k
    ++lastofs;
2126
4.24k
    while (lastofs < ofs) {
2127
1.94k
        Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
2128
2129
1.94k
        IFLT(a[m], key)
2130
881
            lastofs = m+1;              /* a[m] < key */
2131
1.06k
        else
2132
1.06k
            ofs = m;                    /* key <= a[m] */
2133
1.94k
    }
2134
2.29k
    assert(lastofs == ofs);             /* so a[ofs-1] < key <= a[ofs] */
2135
2.29k
    return ofs;
2136
2137
0
fail:
2138
0
    return -1;
2139
2.29k
}
2140
2141
/*
2142
Exactly like gallop_left(), except that if key already exists in a[0:n],
2143
finds the position immediately to the right of the rightmost equal value.
2144
2145
The return value is the int k in 0..n such that
2146
2147
    a[k-1] <= key < a[k]
2148
2149
or -1 if error.
2150
2151
The code duplication is massive, but this is enough different given that
2152
we're sticking to "<" comparisons that it's much harder to follow if
2153
written as one routine with yet another "left or right?" flag.
2154
*/
2155
static Py_ssize_t
2156
gallop_right(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
2157
2.38k
{
2158
2.38k
    Py_ssize_t ofs;
2159
2.38k
    Py_ssize_t lastofs;
2160
2.38k
    Py_ssize_t k;
2161
2162
2.38k
    assert(key && a && n > 0 && hint >= 0 && hint < n);
2163
2164
2.38k
    a += hint;
2165
2.38k
    lastofs = 0;
2166
2.38k
    ofs = 1;
2167
2.38k
    IFLT(key, *a) {
2168
        /* key < a[hint] -- gallop left, until
2169
         * a[hint - ofs] <= key < a[hint - lastofs]
2170
         */
2171
1.38k
        const Py_ssize_t maxofs = hint + 1;             /* &a[0] is lowest */
2172
2.10k
        while (ofs < maxofs) {
2173
1.07k
            IFLT(key, *(a-ofs)) {
2174
715
                lastofs = ofs;
2175
715
                assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
2176
715
                ofs = (ofs << 1) + 1;
2177
715
            }
2178
360
            else                /* a[hint - ofs] <= key */
2179
360
                break;
2180
1.07k
        }
2181
1.38k
        if (ofs > maxofs)
2182
60
            ofs = maxofs;
2183
        /* Translate back to positive offsets relative to &a[0]. */
2184
1.38k
        k = lastofs;
2185
1.38k
        lastofs = hint - ofs;
2186
1.38k
        ofs = hint - k;
2187
1.38k
    }
2188
995
    else {
2189
        /* a[hint] <= key -- gallop right, until
2190
         * a[hint + lastofs] <= key < a[hint + ofs]
2191
        */
2192
995
        const Py_ssize_t maxofs = n - hint;             /* &a[n-1] is highest */
2193
2.00k
        while (ofs < maxofs) {
2194
1.72k
            IFLT(key, a[ofs])
2195
719
                break;
2196
            /* a[hint + ofs] <= key */
2197
1.01k
            lastofs = ofs;
2198
1.01k
            assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
2199
1.01k
            ofs = (ofs << 1) + 1;
2200
1.01k
        }
2201
995
        if (ofs > maxofs)
2202
0
            ofs = maxofs;
2203
        /* Translate back to offsets relative to &a[0]. */
2204
995
        lastofs += hint;
2205
995
        ofs += hint;
2206
995
    }
2207
2.38k
    a -= hint;
2208
2209
2.38k
    assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
2210
    /* Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the
2211
     * right of lastofs but no farther right than ofs.  Do a binary
2212
     * search, with invariant a[lastofs-1] <= key < a[ofs].
2213
     */
2214
2.38k
    ++lastofs;
2215
4.05k
    while (lastofs < ofs) {
2216
1.67k
        Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
2217
2218
1.67k
        IFLT(key, a[m])
2219
961
            ofs = m;                    /* key < a[m] */
2220
716
        else
2221
716
            lastofs = m+1;              /* a[m] <= key */
2222
1.67k
    }
2223
2.38k
    assert(lastofs == ofs);             /* so a[ofs-1] <= key < a[ofs] */
2224
2.38k
    return ofs;
2225
2226
0
fail:
2227
0
    return -1;
2228
2.38k
}
2229
2230
/* Conceptually a MergeState's constructor. */
2231
static void
2232
merge_init(MergeState *ms, Py_ssize_t list_size, int has_keyfunc,
2233
           sortslice *lo)
2234
71.3k
{
2235
71.3k
    assert(ms != NULL);
2236
71.3k
    if (has_keyfunc) {
2237
        /* The temporary space for merging will need at most half the list
2238
         * size rounded up.  Use the minimum possible space so we can use the
2239
         * rest of temparray for other things.  In particular, if there is
2240
         * enough extra space, listsort() will use it to store the keys.
2241
         */
2242
10
        ms->alloced = (list_size + 1) / 2;
2243
2244
        /* ms->alloced describes how many keys will be stored at
2245
           ms->temparray, but we also need to store the values.  Hence,
2246
           ms->alloced is capped at half of MERGESTATE_TEMP_SIZE. */
2247
10
        if (MERGESTATE_TEMP_SIZE / 2 < ms->alloced)
2248
0
            ms->alloced = MERGESTATE_TEMP_SIZE / 2;
2249
10
        ms->a.values = &ms->temparray[ms->alloced];
2250
10
    }
2251
71.3k
    else {
2252
71.3k
        ms->alloced = MERGESTATE_TEMP_SIZE;
2253
71.3k
        ms->a.values = NULL;
2254
71.3k
    }
2255
71.3k
    ms->a.keys = ms->temparray;
2256
71.3k
    ms->n = 0;
2257
71.3k
    ms->min_gallop = MIN_GALLOP;
2258
71.3k
    ms->listlen = list_size;
2259
71.3k
    ms->basekeys = lo->keys;
2260
2261
    /* State for generating minrun values. See listsort.txt. */
2262
71.3k
    ms->mr_e = 0;
2263
71.9k
    while (list_size >> ms->mr_e >= MAX_MINRUN) {
2264
527
        ++ms->mr_e;
2265
527
    }
2266
71.3k
    ms->mr_mask = (1 << ms->mr_e) - 1;
2267
71.3k
    ms->mr_current = 0;
2268
71.3k
}
2269
2270
/* Free all the temp memory owned by the MergeState.  This must be called
2271
 * when you're done with a MergeState, and may be called before then if
2272
 * you want to free the temp memory early.
2273
 */
2274
static void
2275
merge_freemem(MergeState *ms)
2276
71.3k
{
2277
71.3k
    assert(ms != NULL);
2278
71.3k
    if (ms->a.keys != ms->temparray) {
2279
0
        PyMem_Free(ms->a.keys);
2280
0
        ms->a.keys = NULL;
2281
0
    }
2282
71.3k
}
2283
2284
/* Ensure enough temp memory for 'need' array slots is available.
2285
 * Returns 0 on success and -1 if the memory can't be gotten.
2286
 */
2287
static int
2288
merge_getmem(MergeState *ms, Py_ssize_t need)
2289
0
{
2290
0
    int multiplier;
2291
2292
0
    assert(ms != NULL);
2293
0
    if (need <= ms->alloced)
2294
0
        return 0;
2295
2296
0
    multiplier = ms->a.values != NULL ? 2 : 1;
2297
2298
    /* Don't realloc!  That can cost cycles to copy the old data, but
2299
     * we don't care what's in the block.
2300
     */
2301
0
    merge_freemem(ms);
2302
0
    if ((size_t)need > PY_SSIZE_T_MAX / sizeof(PyObject *) / multiplier) {
2303
0
        PyErr_NoMemory();
2304
0
        return -1;
2305
0
    }
2306
0
    ms->a.keys = (PyObject **)PyMem_Malloc(multiplier * need
2307
0
                                          * sizeof(PyObject *));
2308
0
    if (ms->a.keys != NULL) {
2309
0
        ms->alloced = need;
2310
0
        if (ms->a.values != NULL)
2311
0
            ms->a.values = &ms->a.keys[need];
2312
0
        return 0;
2313
0
    }
2314
0
    PyErr_NoMemory();
2315
0
    return -1;
2316
0
}
2317
658
#define MERGE_GETMEM(MS, NEED) ((NEED) <= (MS)->alloced ? 0 :   \
2318
658
                                merge_getmem(MS, NEED))
2319
2320
/* Merge the na elements starting at ssa with the nb elements starting at
2321
 * ssb.keys = ssa.keys + na in a stable way, in-place.  na and nb must be > 0.
2322
 * Must also have that ssa.keys[na-1] belongs at the end of the merge, and
2323
 * should have na <= nb.  See listsort.txt for more info.  Return 0 if
2324
 * successful, -1 if error.
2325
 */
2326
static Py_ssize_t
2327
merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na,
2328
         sortslice ssb, Py_ssize_t nb)
2329
397
{
2330
397
    Py_ssize_t k;
2331
397
    sortslice dest;
2332
397
    int result = -1;            /* guilty until proved innocent */
2333
397
    Py_ssize_t min_gallop;
2334
2335
397
    assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
2336
397
    assert(ssa.keys + na == ssb.keys);
2337
397
    if (MERGE_GETMEM(ms, na) < 0)
2338
0
        return -1;
2339
397
    sortslice_memcpy(&ms->a, 0, &ssa, 0, na);
2340
397
    dest = ssa;
2341
397
    ssa = ms->a;
2342
2343
397
    sortslice_copy_incr(&dest, &ssb);
2344
397
    --nb;
2345
397
    if (nb == 0)
2346
0
        goto Succeed;
2347
397
    if (na == 1)
2348
0
        goto CopyB;
2349
2350
397
    min_gallop = ms->min_gallop;
2351
1.07k
    for (;;) {
2352
1.07k
        Py_ssize_t acount = 0;          /* # of times A won in a row */
2353
1.07k
        Py_ssize_t bcount = 0;          /* # of times B won in a row */
2354
2355
        /* Do the straightforward thing until (if ever) one run
2356
         * appears to win consistently.
2357
         */
2358
26.6k
        for (;;) {
2359
26.6k
            assert(na > 1 && nb > 0);
2360
26.6k
            k = ISLT(ssb.keys[0], ssa.keys[0]);
2361
26.6k
            if (k) {
2362
12.7k
                if (k < 0)
2363
0
                    goto Fail;
2364
12.7k
                sortslice_copy_incr(&dest, &ssb);
2365
12.7k
                ++bcount;
2366
12.7k
                acount = 0;
2367
12.7k
                --nb;
2368
12.7k
                if (nb == 0)
2369
119
                    goto Succeed;
2370
12.5k
                if (bcount >= min_gallop)
2371
333
                    break;
2372
12.5k
            }
2373
13.8k
            else {
2374
13.8k
                sortslice_copy_incr(&dest, &ssa);
2375
13.8k
                ++acount;
2376
13.8k
                bcount = 0;
2377
13.8k
                --na;
2378
13.8k
                if (na == 1)
2379
218
                    goto CopyB;
2380
13.6k
                if (acount >= min_gallop)
2381
407
                    break;
2382
13.6k
            }
2383
26.6k
        }
2384
2385
        /* One run is winning so consistently that galloping may
2386
         * be a huge win.  So try that, and continue galloping until
2387
         * (if ever) neither run appears to be winning consistently
2388
         * anymore.
2389
         */
2390
740
        ++min_gallop;
2391
1.01k
        do {
2392
1.01k
            assert(na > 1 && nb > 0);
2393
1.01k
            min_gallop -= min_gallop > 1;
2394
1.01k
            ms->min_gallop = min_gallop;
2395
1.01k
            k = gallop_right(ms, ssb.keys[0], ssa.keys, na, 0);
2396
1.01k
            acount = k;
2397
1.01k
            if (k) {
2398
591
                if (k < 0)
2399
0
                    goto Fail;
2400
591
                sortslice_memcpy(&dest, 0, &ssa, 0, k);
2401
591
                sortslice_advance(&dest, k);
2402
591
                sortslice_advance(&ssa, k);
2403
591
                na -= k;
2404
591
                if (na == 1)
2405
24
                    goto CopyB;
2406
                /* na==0 is impossible now if the comparison
2407
                 * function is consistent, but we can't assume
2408
                 * that it is.
2409
                 */
2410
567
                if (na == 0)
2411
0
                    goto Succeed;
2412
567
            }
2413
987
            sortslice_copy_incr(&dest, &ssb);
2414
987
            --nb;
2415
987
            if (nb == 0)
2416
22
                goto Succeed;
2417
2418
965
            k = gallop_left(ms, ssa.keys[0], ssb.keys, nb, 0);
2419
965
            bcount = k;
2420
965
            if (k) {
2421
607
                if (k < 0)
2422
0
                    goto Fail;
2423
607
                sortslice_memmove(&dest, 0, &ssb, 0, k);
2424
607
                sortslice_advance(&dest, k);
2425
607
                sortslice_advance(&ssb, k);
2426
607
                nb -= k;
2427
607
                if (nb == 0)
2428
0
                    goto Succeed;
2429
607
            }
2430
965
            sortslice_copy_incr(&dest, &ssa);
2431
965
            --na;
2432
965
            if (na == 1)
2433
14
                goto CopyB;
2434
965
        } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
2435
680
        ++min_gallop;           /* penalize it for leaving galloping mode */
2436
680
        ms->min_gallop = min_gallop;
2437
680
    }
2438
141
Succeed:
2439
141
    result = 0;
2440
141
Fail:
2441
141
    if (na)
2442
141
        sortslice_memcpy(&dest, 0, &ssa, 0, na);
2443
141
    return result;
2444
256
CopyB:
2445
256
    assert(na == 1 && nb > 0);
2446
    /* The last element of ssa belongs at the end of the merge. */
2447
256
    sortslice_memmove(&dest, 0, &ssb, 0, nb);
2448
256
    sortslice_copy(&dest, nb, &ssa, 0);
2449
256
    return 0;
2450
256
}
2451
2452
/* Merge the na elements starting at pa with the nb elements starting at
2453
 * ssb.keys = ssa.keys + na in a stable way, in-place.  na and nb must be > 0.
2454
 * Must also have that ssa.keys[na-1] belongs at the end of the merge, and
2455
 * should have na >= nb.  See listsort.txt for more info.  Return 0 if
2456
 * successful, -1 if error.
2457
 */
2458
static Py_ssize_t
2459
merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na,
2460
         sortslice ssb, Py_ssize_t nb)
2461
261
{
2462
261
    Py_ssize_t k;
2463
261
    sortslice dest, basea, baseb;
2464
261
    int result = -1;            /* guilty until proved innocent */
2465
261
    Py_ssize_t min_gallop;
2466
2467
261
    assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
2468
261
    assert(ssa.keys + na == ssb.keys);
2469
261
    if (MERGE_GETMEM(ms, nb) < 0)
2470
0
        return -1;
2471
261
    dest = ssb;
2472
261
    sortslice_advance(&dest, nb-1);
2473
261
    sortslice_memcpy(&ms->a, 0, &ssb, 0, nb);
2474
261
    basea = ssa;
2475
261
    baseb = ms->a;
2476
261
    ssb.keys = ms->a.keys + nb - 1;
2477
261
    if (ssb.values != NULL)
2478
0
        ssb.values = ms->a.values + nb - 1;
2479
261
    sortslice_advance(&ssa, na - 1);
2480
2481
261
    sortslice_copy_decr(&dest, &ssa);
2482
261
    --na;
2483
261
    if (na == 0)
2484
0
        goto Succeed;
2485
261
    if (nb == 1)
2486
0
        goto CopyA;
2487
2488
261
    min_gallop = ms->min_gallop;
2489
661
    for (;;) {
2490
661
        Py_ssize_t acount = 0;          /* # of times A won in a row */
2491
661
        Py_ssize_t bcount = 0;          /* # of times B won in a row */
2492
2493
        /* Do the straightforward thing until (if ever) one run
2494
         * appears to win consistently.
2495
         */
2496
17.4k
        for (;;) {
2497
17.4k
            assert(na > 0 && nb > 1);
2498
17.4k
            k = ISLT(ssb.keys[0], ssa.keys[0]);
2499
17.4k
            if (k) {
2500
9.82k
                if (k < 0)
2501
0
                    goto Fail;
2502
9.82k
                sortslice_copy_decr(&dest, &ssa);
2503
9.82k
                ++acount;
2504
9.82k
                bcount = 0;
2505
9.82k
                --na;
2506
9.82k
                if (na == 0)
2507
179
                    goto Succeed;
2508
9.64k
                if (acount >= min_gallop)
2509
309
                    break;
2510
9.64k
            }
2511
7.65k
            else {
2512
7.65k
                sortslice_copy_decr(&dest, &ssb);
2513
7.65k
                ++bcount;
2514
7.65k
                acount = 0;
2515
7.65k
                --nb;
2516
7.65k
                if (nb == 1)
2517
28
                    goto CopyA;
2518
7.62k
                if (bcount >= min_gallop)
2519
145
                    break;
2520
7.62k
            }
2521
17.4k
        }
2522
2523
        /* One run is winning so consistently that galloping may
2524
         * be a huge win.  So try that, and continue galloping until
2525
         * (if ever) neither run appears to be winning consistently
2526
         * anymore.
2527
         */
2528
454
        ++min_gallop;
2529
712
        do {
2530
712
            assert(na > 0 && nb > 1);
2531
712
            min_gallop -= min_gallop > 1;
2532
712
            ms->min_gallop = min_gallop;
2533
712
            k = gallop_right(ms, ssb.keys[0], basea.keys, na, na-1);
2534
712
            if (k < 0)
2535
0
                goto Fail;
2536
712
            k = na - k;
2537
712
            acount = k;
2538
712
            if (k) {
2539
436
                sortslice_advance(&dest, -k);
2540
436
                sortslice_advance(&ssa, -k);
2541
436
                sortslice_memmove(&dest, 1, &ssa, 1, k);
2542
436
                na -= k;
2543
436
                if (na == 0)
2544
38
                    goto Succeed;
2545
436
            }
2546
674
            sortslice_copy_decr(&dest, &ssb);
2547
674
            --nb;
2548
674
            if (nb == 1)
2549
0
                goto CopyA;
2550
2551
674
            k = gallop_left(ms, ssa.keys[0], baseb.keys, nb, nb-1);
2552
674
            if (k < 0)
2553
0
                goto Fail;
2554
674
            k = nb - k;
2555
674
            bcount = k;
2556
674
            if (k) {
2557
426
                sortslice_advance(&dest, -k);
2558
426
                sortslice_advance(&ssb, -k);
2559
426
                sortslice_memcpy(&dest, 1, &ssb, 1, k);
2560
426
                nb -= k;
2561
426
                if (nb == 1)
2562
0
                    goto CopyA;
2563
                /* nb==0 is impossible now if the comparison
2564
                 * function is consistent, but we can't assume
2565
                 * that it is.
2566
                 */
2567
426
                if (nb == 0)
2568
0
                    goto Succeed;
2569
426
            }
2570
674
            sortslice_copy_decr(&dest, &ssa);
2571
674
            --na;
2572
674
            if (na == 0)
2573
16
                goto Succeed;
2574
674
        } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
2575
400
        ++min_gallop;           /* penalize it for leaving galloping mode */
2576
400
        ms->min_gallop = min_gallop;
2577
400
    }
2578
233
Succeed:
2579
233
    result = 0;
2580
233
Fail:
2581
233
    if (nb)
2582
233
        sortslice_memcpy(&dest, -(nb-1), &baseb, 0, nb);
2583
233
    return result;
2584
28
CopyA:
2585
28
    assert(nb == 1 && na > 0);
2586
    /* The first element of ssb belongs at the front of the merge. */
2587
28
    sortslice_memmove(&dest, 1-na, &ssa, 1-na, na);
2588
28
    sortslice_advance(&dest, -na);
2589
28
    sortslice_advance(&ssa, -na);
2590
28
    sortslice_copy(&dest, 0, &ssb, 0);
2591
28
    return 0;
2592
28
}
2593
2594
/* Merge the two runs at stack indices i and i+1.
2595
 * Returns 0 on success, -1 on error.
2596
 */
2597
static Py_ssize_t
2598
merge_at(MergeState *ms, Py_ssize_t i)
2599
658
{
2600
658
    sortslice ssa, ssb;
2601
658
    Py_ssize_t na, nb;
2602
658
    Py_ssize_t k;
2603
2604
658
    assert(ms != NULL);
2605
658
    assert(ms->n >= 2);
2606
658
    assert(i >= 0);
2607
658
    assert(i == ms->n - 2 || i == ms->n - 3);
2608
2609
658
    ssa = ms->pending[i].base;
2610
658
    na = ms->pending[i].len;
2611
658
    ssb = ms->pending[i+1].base;
2612
658
    nb = ms->pending[i+1].len;
2613
658
    assert(na > 0 && nb > 0);
2614
658
    assert(ssa.keys + na == ssb.keys);
2615
2616
    /* Record the length of the combined runs; if i is the 3rd-last
2617
     * run now, also slide over the last run (which isn't involved
2618
     * in this merge).  The current run i+1 goes away in any case.
2619
     */
2620
658
    ms->pending[i].len = na + nb;
2621
658
    if (i == ms->n - 3)
2622
0
        ms->pending[i+1] = ms->pending[i+2];
2623
658
    --ms->n;
2624
2625
    /* Where does b start in a?  Elements in a before that can be
2626
     * ignored (already in place).
2627
     */
2628
658
    k = gallop_right(ms, *ssb.keys, ssa.keys, na, 0);
2629
658
    if (k < 0)
2630
0
        return -1;
2631
658
    sortslice_advance(&ssa, k);
2632
658
    na -= k;
2633
658
    if (na == 0)
2634
0
        return 0;
2635
2636
    /* Where does a end in b?  Elements in b after that can be
2637
     * ignored (already in place).
2638
     */
2639
658
    nb = gallop_left(ms, ssa.keys[na-1], ssb.keys, nb, nb-1);
2640
658
    if (nb <= 0)
2641
0
        return nb;
2642
2643
    /* Merge what remains of the runs, using a temp array with
2644
     * min(na, nb) elements.
2645
     */
2646
658
    if (na <= nb)
2647
397
        return merge_lo(ms, ssa, na, ssb, nb);
2648
261
    else
2649
261
        return merge_hi(ms, ssa, na, ssb, nb);
2650
658
}
2651
2652
/* Two adjacent runs begin at index s1. The first run has length n1, and
2653
 * the second run (starting at index s1+n1) has length n2. The list has total
2654
 * length n.
2655
 * Compute the "power" of the first run. See listsort.txt for details.
2656
 */
2657
static int
2658
powerloop(Py_ssize_t s1, Py_ssize_t n1, Py_ssize_t n2, Py_ssize_t n)
2659
658
{
2660
658
    int result = 0;
2661
658
    assert(s1 >= 0);
2662
658
    assert(n1 > 0 && n2 > 0);
2663
658
    assert(s1 + n1 + n2 <= n);
2664
    /* midpoints a and b:
2665
     * a = s1 + n1/2
2666
     * b = s1 + n1 + n2/2 = a + (n1 + n2)/2
2667
     *
2668
     * Those may not be integers, though, because of the "/2". So we work with
2669
     * 2*a and 2*b instead, which are necessarily integers. It makes no
2670
     * difference to the outcome, since the bits in the expansion of (2*i)/n
2671
     * are merely shifted one position from those of i/n.
2672
     */
2673
658
    Py_ssize_t a = 2 * s1 + n1;  /* 2*a */
2674
658
    Py_ssize_t b = a + n1 + n2;  /* 2*b */
2675
    /* Emulate a/n and b/n one bit a time, until bits differ. */
2676
978
    for (;;) {
2677
978
        ++result;
2678
978
        if (a >= n) {  /* both quotient bits are 1 */
2679
160
            assert(b >= a);
2680
160
            a -= n;
2681
160
            b -= n;
2682
160
        }
2683
818
        else if (b >= n) {  /* a/n bit is 0, b/n bit is 1 */
2684
658
            break;
2685
658
        } /* else both quotient bits are 0 */
2686
978
        assert(a < b && b < n);
2687
320
        a <<= 1;
2688
320
        b <<= 1;
2689
320
    }
2690
658
    return result;
2691
658
}
2692
2693
/* The next run has been identified, of length n2.
2694
 * If there's already a run on the stack, apply the "powersort" merge strategy:
2695
 * compute the topmost run's "power" (depth in a conceptual binary merge tree)
2696
 * and merge adjacent runs on the stack with greater power. See listsort.txt
2697
 * for more info.
2698
 *
2699
 * It's the caller's responsibility to push the new run on the stack when this
2700
 * returns.
2701
 *
2702
 * Returns 0 on success, -1 on error.
2703
 */
2704
static int
2705
found_new_run(MergeState *ms, Py_ssize_t n2)
2706
50.0k
{
2707
50.0k
    assert(ms);
2708
50.0k
    if (ms->n) {
2709
658
        assert(ms->n > 0);
2710
658
        struct s_slice *p = ms->pending;
2711
658
        Py_ssize_t s1 = p[ms->n - 1].base.keys - ms->basekeys; /* start index */
2712
658
        Py_ssize_t n1 = p[ms->n - 1].len;
2713
658
        int power = powerloop(s1, n1, n2, ms->listlen);
2714
789
        while (ms->n > 1 && p[ms->n - 2].power > power) {
2715
131
            if (merge_at(ms, ms->n - 2) < 0)
2716
0
                return -1;
2717
131
        }
2718
658
        assert(ms->n < 2 || p[ms->n - 2].power < power);
2719
658
        p[ms->n - 1].power = power;
2720
658
    }
2721
50.0k
    return 0;
2722
50.0k
}
2723
2724
/* Regardless of invariants, merge all runs on the stack until only one
2725
 * remains.  This is used at the end of the mergesort.
2726
 *
2727
 * Returns 0 on success, -1 on error.
2728
 */
2729
static int
2730
merge_force_collapse(MergeState *ms)
2731
49.4k
{
2732
49.4k
    struct s_slice *p = ms->pending;
2733
2734
49.4k
    assert(ms);
2735
49.9k
    while (ms->n > 1) {
2736
527
        Py_ssize_t n = ms->n - 2;
2737
527
        if (n > 0 && p[n-1].len < p[n+1].len)
2738
0
            --n;
2739
527
        if (merge_at(ms, n) < 0)
2740
0
            return -1;
2741
527
    }
2742
49.4k
    return 0;
2743
49.4k
}
2744
2745
/* Return the next minrun value to use. See listsort.txt. */
2746
Py_LOCAL_INLINE(Py_ssize_t)
2747
minrun_next(MergeState *ms)
2748
50.0k
{
2749
50.0k
    ms->mr_current += ms->listlen;
2750
50.0k
    assert(ms->mr_current >= 0); /* no overflow */
2751
50.0k
    Py_ssize_t result = ms->mr_current >> ms->mr_e;
2752
50.0k
    ms->mr_current &= ms->mr_mask;
2753
50.0k
    return result;
2754
50.0k
}
2755
2756
/* Here we define custom comparison functions to optimize for the cases one commonly
2757
 * encounters in practice: homogeneous lists, often of one of the basic types. */
2758
2759
/* This struct holds the comparison function and helper functions
2760
 * selected in the pre-sort check. */
2761
2762
/* These are the special case compare functions.
2763
 * ms->key_compare will always point to one of these: */
2764
2765
/* Heterogeneous compare: default, always safe to fall back on. */
2766
static int
2767
safe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
2768
0
{
2769
    /* No assumptions necessary! */
2770
0
    return PyObject_RichCompareBool(v, w, Py_LT);
2771
0
}
2772
2773
/* Homogeneous compare: safe for any two comparable objects of the same type.
2774
 * (ms->key_richcompare is set to ob_type->tp_richcompare in the
2775
 *  pre-sort check.)
2776
 */
2777
static int
2778
unsafe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
2779
214k
{
2780
214k
    PyObject *res_obj; int res;
2781
2782
    /* No assumptions, because we check first: */
2783
214k
    if (Py_TYPE(v)->tp_richcompare != ms->key_richcompare)
2784
0
        return PyObject_RichCompareBool(v, w, Py_LT);
2785
2786
214k
    assert(ms->key_richcompare != NULL);
2787
214k
    res_obj = (*(ms->key_richcompare))(v, w, Py_LT);
2788
2789
214k
    if (res_obj == Py_NotImplemented) {
2790
0
        Py_DECREF(res_obj);
2791
0
        return PyObject_RichCompareBool(v, w, Py_LT);
2792
0
    }
2793
214k
    if (res_obj == NULL)
2794
0
        return -1;
2795
2796
214k
    if (PyBool_Check(res_obj)) {
2797
214k
        res = (res_obj == Py_True);
2798
214k
        assert(_Py_IsImmortal(res_obj));
2799
214k
    }
2800
0
    else {
2801
0
        res = PyObject_IsTrue(res_obj);
2802
0
        Py_DECREF(res_obj);
2803
0
    }
2804
2805
    /* Note that we can't assert
2806
     *     res == PyObject_RichCompareBool(v, w, Py_LT);
2807
     * because of evil compare functions like this:
2808
     *     lambda a, b:  int(random.random() * 3) - 1)
2809
     * (which is actually in test_sort.py) */
2810
214k
    return res;
2811
214k
}
2812
2813
/* Latin string compare: safe for any two latin (one byte per char) strings. */
2814
static int
2815
unsafe_latin_compare(PyObject *v, PyObject *w, MergeState *ms)
2816
304k
{
2817
304k
    Py_ssize_t len;
2818
304k
    int res;
2819
2820
    /* Modified from Objects/unicodeobject.c:unicode_compare, assuming: */
2821
304k
    assert(Py_IS_TYPE(v, &PyUnicode_Type));
2822
304k
    assert(Py_IS_TYPE(w, &PyUnicode_Type));
2823
304k
    assert(PyUnicode_KIND(v) == PyUnicode_KIND(w));
2824
304k
    assert(PyUnicode_KIND(v) == PyUnicode_1BYTE_KIND);
2825
2826
304k
    len = Py_MIN(PyUnicode_GET_LENGTH(v), PyUnicode_GET_LENGTH(w));
2827
304k
    res = memcmp(PyUnicode_DATA(v), PyUnicode_DATA(w), len);
2828
2829
304k
    res = (res != 0 ?
2830
283k
           res < 0 :
2831
304k
           PyUnicode_GET_LENGTH(v) < PyUnicode_GET_LENGTH(w));
2832
2833
304k
    assert(res == PyObject_RichCompareBool(v, w, Py_LT));;
2834
304k
    return res;
2835
304k
}
2836
2837
/* Bounded int compare: compare any two longs that fit in a single machine word. */
2838
static int
2839
unsafe_long_compare(PyObject *v, PyObject *w, MergeState *ms)
2840
58.7k
{
2841
58.7k
    PyLongObject *vl, *wl;
2842
58.7k
    intptr_t v0, w0;
2843
58.7k
    int res;
2844
2845
    /* Modified from Objects/longobject.c:long_compare, assuming: */
2846
58.7k
    assert(Py_IS_TYPE(v, &PyLong_Type));
2847
58.7k
    assert(Py_IS_TYPE(w, &PyLong_Type));
2848
58.7k
    assert(_PyLong_IsCompact((PyLongObject *)v));
2849
58.7k
    assert(_PyLong_IsCompact((PyLongObject *)w));
2850
2851
58.7k
    vl = (PyLongObject*)v;
2852
58.7k
    wl = (PyLongObject*)w;
2853
2854
58.7k
    v0 = _PyLong_CompactValue(vl);
2855
58.7k
    w0 = _PyLong_CompactValue(wl);
2856
2857
58.7k
    res = v0 < w0;
2858
58.7k
    assert(res == PyObject_RichCompareBool(v, w, Py_LT));
2859
58.7k
    return res;
2860
58.7k
}
2861
2862
/* Float compare: compare any two floats. */
2863
static int
2864
unsafe_float_compare(PyObject *v, PyObject *w, MergeState *ms)
2865
0
{
2866
0
    int res;
2867
2868
    /* Modified from Objects/floatobject.c:float_richcompare, assuming: */
2869
0
    assert(Py_IS_TYPE(v, &PyFloat_Type));
2870
0
    assert(Py_IS_TYPE(w, &PyFloat_Type));
2871
2872
0
    res = PyFloat_AS_DOUBLE(v) < PyFloat_AS_DOUBLE(w);
2873
0
    assert(res == PyObject_RichCompareBool(v, w, Py_LT));
2874
0
    return res;
2875
0
}
2876
2877
/* Tuple compare: compare *any* two tuples, using
2878
 * ms->tuple_elem_compare to compare the first elements, which is set
2879
 * using the same pre-sort check as we use for ms->key_compare,
2880
 * but run on the list [x[0] for x in L]. This allows us to optimize compares
2881
 * on two levels (as long as [x[0] for x in L] is type-homogeneous.) The idea is
2882
 * that most tuple compares don't involve x[1:]. */
2883
static int
2884
unsafe_tuple_compare(PyObject *v, PyObject *w, MergeState *ms)
2885
732
{
2886
732
    PyTupleObject *vt, *wt;
2887
732
    Py_ssize_t i, vlen, wlen;
2888
732
    int k;
2889
2890
    /* Modified from Objects/tupleobject.c:tuplerichcompare, assuming: */
2891
732
    assert(Py_IS_TYPE(v, &PyTuple_Type));
2892
732
    assert(Py_IS_TYPE(w, &PyTuple_Type));
2893
732
    assert(Py_SIZE(v) > 0);
2894
732
    assert(Py_SIZE(w) > 0);
2895
2896
732
    vt = (PyTupleObject *)v;
2897
732
    wt = (PyTupleObject *)w;
2898
2899
732
    vlen = Py_SIZE(vt);
2900
732
    wlen = Py_SIZE(wt);
2901
2902
732
    for (i = 0; i < vlen && i < wlen; i++) {
2903
732
        k = PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_EQ);
2904
732
        if (k < 0)
2905
0
            return -1;
2906
732
        if (!k)
2907
732
            break;
2908
732
    }
2909
2910
732
    if (i >= vlen || i >= wlen)
2911
0
        return vlen < wlen;
2912
2913
732
    if (i == 0)
2914
732
        return ms->tuple_elem_compare(vt->ob_item[i], wt->ob_item[i], ms);
2915
0
    else
2916
0
        return PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_LT);
2917
732
}
2918
2919
/* An adaptive, stable, natural mergesort.  See listsort.txt.
2920
 * Returns Py_None on success, NULL on error.  Even in case of error, the
2921
 * list will be some permutation of its input state (nothing is lost or
2922
 * duplicated).
2923
 */
2924
/*[clinic input]
2925
@permit_long_docstring_body
2926
@critical_section
2927
list.sort
2928
2929
    *
2930
    key as keyfunc: object = None
2931
    reverse: bool = False
2932
2933
Sort the list in ascending order and return None.
2934
2935
The sort is in-place (i.e. the list itself is modified) and stable (i.e. the
2936
order of two equal elements is maintained).
2937
2938
If a key function is given, apply it once to each list item and sort them,
2939
ascending or descending, according to their function values.
2940
2941
The reverse flag can be set to sort in descending order.
2942
[clinic start generated code]*/
2943
2944
static PyObject *
2945
list_sort_impl(PyListObject *self, PyObject *keyfunc, int reverse)
2946
/*[clinic end generated code: output=57b9f9c5e23fbe42 input=e4f6b6069181ad7d]*/
2947
71.3k
{
2948
71.3k
    MergeState ms;
2949
71.3k
    Py_ssize_t nremaining;
2950
71.3k
    Py_ssize_t minrun;
2951
71.3k
    sortslice lo;
2952
71.3k
    Py_ssize_t saved_ob_size, saved_allocated;
2953
71.3k
    PyObject **saved_ob_item;
2954
71.3k
    PyObject **final_ob_item;
2955
71.3k
    PyObject *result = NULL;            /* guilty until proved innocent */
2956
71.3k
    Py_ssize_t i;
2957
71.3k
    PyObject **keys;
2958
2959
71.3k
    assert(self != NULL);
2960
71.3k
    assert(PyList_Check(self));
2961
71.3k
    if (keyfunc == Py_None)
2962
23.2k
        keyfunc = NULL;
2963
2964
    /* The list is temporarily made empty, so that mutations performed
2965
     * by comparison functions can't affect the slice of memory we're
2966
     * sorting (allowing mutations during sorting is a core-dump
2967
     * factory, since ob_item may change).
2968
     */
2969
71.3k
    saved_ob_size = Py_SIZE(self);
2970
71.3k
    saved_ob_item = self->ob_item;
2971
71.3k
    saved_allocated = self->allocated;
2972
71.3k
    Py_SET_SIZE(self, 0);
2973
71.3k
    FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item, NULL);
2974
71.3k
    self->allocated = -1; /* any operation will reset it to >= 0 */
2975
2976
71.3k
    if (keyfunc == NULL) {
2977
71.3k
        keys = NULL;
2978
71.3k
        lo.keys = saved_ob_item;
2979
71.3k
        lo.values = NULL;
2980
71.3k
    }
2981
10
    else {
2982
10
        if (saved_ob_size < MERGESTATE_TEMP_SIZE/2)
2983
            /* Leverage stack space we allocated but won't otherwise use */
2984
10
            keys = &ms.temparray[saved_ob_size+1];
2985
0
        else {
2986
0
            keys = PyMem_Malloc(sizeof(PyObject *) * saved_ob_size);
2987
0
            if (keys == NULL) {
2988
0
                PyErr_NoMemory();
2989
0
                goto keyfunc_fail;
2990
0
            }
2991
0
        }
2992
2993
31
        for (i = 0; i < saved_ob_size ; i++) {
2994
21
            keys[i] = PyObject_CallOneArg(keyfunc, saved_ob_item[i]);
2995
21
            if (keys[i] == NULL) {
2996
0
                for (i=i-1 ; i>=0 ; i--)
2997
0
                    Py_DECREF(keys[i]);
2998
0
                if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2)
2999
0
                    PyMem_Free(keys);
3000
0
                goto keyfunc_fail;
3001
0
            }
3002
21
        }
3003
3004
10
        lo.keys = keys;
3005
10
        lo.values = saved_ob_item;
3006
10
    }
3007
3008
3009
    /* The pre-sort check: here's where we decide which compare function to use.
3010
     * How much optimization is safe? We test for homogeneity with respect to
3011
     * several properties that are expensive to check at compare-time, and
3012
     * set ms appropriately. */
3013
71.3k
    if (saved_ob_size > 1) {
3014
        /* Assume the first element is representative of the whole list. */
3015
49.4k
        int keys_are_in_tuples = (Py_IS_TYPE(lo.keys[0], &PyTuple_Type) &&
3016
4
                                  Py_SIZE(lo.keys[0]) > 0);
3017
3018
49.4k
        PyTypeObject* key_type = (keys_are_in_tuples ?
3019
4
                                  Py_TYPE(PyTuple_GET_ITEM(lo.keys[0], 0)) :
3020
49.4k
                                  Py_TYPE(lo.keys[0]));
3021
3022
49.4k
        int keys_are_all_same_type = 1;
3023
49.4k
        int strings_are_latin = 1;
3024
49.4k
        int ints_are_bounded = 1;
3025
3026
        /* Prove that assumption by checking every key. */
3027
287k
        for (i=0; i < saved_ob_size; i++) {
3028
3029
238k
            if (keys_are_in_tuples &&
3030
176
                !(Py_IS_TYPE(lo.keys[i], &PyTuple_Type) && Py_SIZE(lo.keys[i]) != 0)) {
3031
0
                keys_are_in_tuples = 0;
3032
0
                keys_are_all_same_type = 0;
3033
0
                break;
3034
0
            }
3035
3036
            /* Note: for lists of tuples, key is the first element of the tuple
3037
             * lo.keys[i], not lo.keys[i] itself! We verify type-homogeneity
3038
             * for lists of tuples in the if-statement directly above. */
3039
238k
            PyObject *key = (keys_are_in_tuples ?
3040
176
                             PyTuple_GET_ITEM(lo.keys[i], 0) :
3041
238k
                             lo.keys[i]);
3042
3043
238k
            if (!Py_IS_TYPE(key, key_type)) {
3044
0
                keys_are_all_same_type = 0;
3045
                /* If keys are in tuple we must loop over the whole list to make
3046
                   sure all items are tuples */
3047
0
                if (!keys_are_in_tuples) {
3048
0
                    break;
3049
0
                }
3050
0
            }
3051
3052
238k
            if (keys_are_all_same_type) {
3053
238k
                if (key_type == &PyLong_Type &&
3054
54.2k
                    ints_are_bounded &&
3055
54.2k
                    !_PyLong_IsCompact((PyLongObject *)key)) {
3056
3057
0
                    ints_are_bounded = 0;
3058
0
                }
3059
238k
                else if (key_type == &PyUnicode_Type &&
3060
184k
                         strings_are_latin &&
3061
244k
                         PyUnicode_KIND(key) != PyUnicode_1BYTE_KIND) {
3062
3063
13.5k
                        strings_are_latin = 0;
3064
13.5k
                    }
3065
238k
                }
3066
238k
            }
3067
3068
        /* Choose the best compare, given what we now know about the keys. */
3069
49.4k
        if (keys_are_all_same_type) {
3070
3071
49.4k
            if (key_type == &PyUnicode_Type && strings_are_latin) {
3072
13.7k
                ms.key_compare = unsafe_latin_compare;
3073
13.7k
            }
3074
35.6k
            else if (key_type == &PyLong_Type && ints_are_bounded) {
3075
22.1k
                ms.key_compare = unsafe_long_compare;
3076
22.1k
            }
3077
13.5k
            else if (key_type == &PyFloat_Type) {
3078
0
                ms.key_compare = unsafe_float_compare;
3079
0
            }
3080
13.5k
            else if ((ms.key_richcompare = key_type->tp_richcompare) != NULL) {
3081
13.5k
                ms.key_compare = unsafe_object_compare;
3082
13.5k
            }
3083
0
            else {
3084
0
                ms.key_compare = safe_object_compare;
3085
0
            }
3086
49.4k
        }
3087
0
        else {
3088
0
            ms.key_compare = safe_object_compare;
3089
0
        }
3090
3091
49.4k
        if (keys_are_in_tuples) {
3092
            /* Make sure we're not dealing with tuples of tuples
3093
             * (remember: here, key_type refers list [key[0] for key in keys]) */
3094
4
            if (key_type == &PyTuple_Type) {
3095
0
                ms.tuple_elem_compare = safe_object_compare;
3096
0
            }
3097
4
            else {
3098
4
                ms.tuple_elem_compare = ms.key_compare;
3099
4
            }
3100
3101
4
            ms.key_compare = unsafe_tuple_compare;
3102
4
        }
3103
49.4k
    }
3104
    /* End of pre-sort check: ms is now set properly! */
3105
3106
71.3k
    merge_init(&ms, saved_ob_size, keys != NULL, &lo);
3107
3108
71.3k
    nremaining = saved_ob_size;
3109
71.3k
    if (nremaining < 2)
3110
21.9k
        goto succeed;
3111
3112
    /* Reverse sort stability achieved by initially reversing the list,
3113
    applying a stable forward sort, then reversing the final result. */
3114
49.4k
    if (reverse) {
3115
3
        if (keys != NULL)
3116
0
            reverse_slice(&keys[0], &keys[saved_ob_size]);
3117
3
        reverse_slice(&saved_ob_item[0], &saved_ob_item[saved_ob_size]);
3118
3
    }
3119
3120
    /* March over the array once, left to right, finding natural runs,
3121
     * and extending short natural runs to minrun elements.
3122
     */
3123
50.0k
    do {
3124
50.0k
        Py_ssize_t n;
3125
3126
        /* Identify next run. */
3127
50.0k
        n = count_run(&ms, &lo, nremaining);
3128
50.0k
        if (n < 0)
3129
0
            goto fail;
3130
        /* If short, extend to min(minrun, nremaining). */
3131
50.0k
        minrun = minrun_next(&ms);
3132
50.0k
        if (n < minrun) {
3133
19.9k
            const Py_ssize_t force = nremaining <= minrun ?
3134
19.2k
                              nremaining : minrun;
3135
19.9k
            if (binarysort(&ms, &lo, force, n) < 0)
3136
0
                goto fail;
3137
19.9k
            n = force;
3138
19.9k
        }
3139
        /* Maybe merge pending runs. */
3140
50.0k
        assert(ms.n == 0 || ms.pending[ms.n -1].base.keys +
3141
50.0k
                            ms.pending[ms.n-1].len == lo.keys);
3142
50.0k
        if (found_new_run(&ms, n) < 0)
3143
0
            goto fail;
3144
        /* Push new run on stack. */
3145
50.0k
        assert(ms.n < MAX_MERGE_PENDING);
3146
50.0k
        ms.pending[ms.n].base = lo;
3147
50.0k
        ms.pending[ms.n].len = n;
3148
50.0k
        ++ms.n;
3149
        /* Advance to find next run. */
3150
50.0k
        sortslice_advance(&lo, n);
3151
50.0k
        nremaining -= n;
3152
50.0k
    } while (nremaining);
3153
3154
49.4k
    if (merge_force_collapse(&ms) < 0)
3155
0
        goto fail;
3156
49.4k
    assert(ms.n == 1);
3157
49.4k
    assert(keys == NULL
3158
49.4k
           ? ms.pending[0].base.keys == saved_ob_item
3159
49.4k
           : ms.pending[0].base.keys == &keys[0]);
3160
49.4k
    assert(ms.pending[0].len == saved_ob_size);
3161
49.4k
    lo = ms.pending[0].base;
3162
3163
71.3k
succeed:
3164
71.3k
    result = Py_None;
3165
71.3k
fail:
3166
71.3k
    if (keys != NULL) {
3167
31
        for (i = 0; i < saved_ob_size; i++)
3168
21
            Py_DECREF(keys[i]);
3169
10
        if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2)
3170
0
            PyMem_Free(keys);
3171
10
    }
3172
3173
71.3k
    if (self->allocated != -1 && result != NULL) {
3174
        /* The user mucked with the list during the sort,
3175
         * and we don't already have another error to report.
3176
         */
3177
0
        PyErr_SetString(PyExc_ValueError, "list modified during sort");
3178
0
        result = NULL;
3179
0
    }
3180
3181
71.3k
    if (reverse && saved_ob_size > 1)
3182
3
        reverse_slice(saved_ob_item, saved_ob_item + saved_ob_size);
3183
3184
71.3k
    merge_freemem(&ms);
3185
3186
71.3k
keyfunc_fail:
3187
71.3k
    final_ob_item = self->ob_item;
3188
71.3k
    i = Py_SIZE(self);
3189
71.3k
    Py_SET_SIZE(self, saved_ob_size);
3190
71.3k
    FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item, saved_ob_item);
3191
71.3k
    FT_ATOMIC_STORE_SSIZE_RELAXED(self->allocated, saved_allocated);
3192
71.3k
    if (final_ob_item != NULL) {
3193
        /* we cannot use list_clear() for this because it does not
3194
           guarantee that the list is really empty when it returns */
3195
0
        while (--i >= 0) {
3196
0
            Py_XDECREF(final_ob_item[i]);
3197
0
        }
3198
#ifdef Py_GIL_DISABLED
3199
        ensure_shared_on_resize(self);
3200
        bool use_qsbr = _PyObject_GC_IS_SHARED(self);
3201
#else
3202
0
        bool use_qsbr = false;
3203
0
#endif
3204
0
        free_list_items(final_ob_item, use_qsbr);
3205
0
    }
3206
71.3k
    return Py_XNewRef(result);
3207
71.3k
}
3208
#undef IFLT
3209
#undef ISLT
3210
3211
int
3212
PyList_Sort(PyObject *v)
3213
48.1k
{
3214
48.1k
    if (v == NULL || !PyList_Check(v)) {
3215
0
        PyErr_BadInternalCall();
3216
0
        return -1;
3217
0
    }
3218
48.1k
    Py_BEGIN_CRITICAL_SECTION(v);
3219
48.1k
    v = list_sort_impl((PyListObject *)v, NULL, 0);
3220
48.1k
    Py_END_CRITICAL_SECTION();
3221
48.1k
    if (v == NULL)
3222
0
        return -1;
3223
48.1k
    Py_DECREF(v);
3224
48.1k
    return 0;
3225
48.1k
}
3226
3227
/*[clinic input]
3228
@critical_section
3229
list.reverse
3230
3231
Reverse *IN PLACE*.
3232
[clinic start generated code]*/
3233
3234
static PyObject *
3235
list_reverse_impl(PyListObject *self)
3236
/*[clinic end generated code: output=482544fc451abea9 input=04ac8e0c6a66e4d9]*/
3237
0
{
3238
0
    if (Py_SIZE(self) > 1)
3239
0
        reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
3240
0
    Py_RETURN_NONE;
3241
0
}
3242
3243
int
3244
PyList_Reverse(PyObject *v)
3245
0
{
3246
0
    PyListObject *self = (PyListObject *)v;
3247
3248
0
    if (v == NULL || !PyList_Check(v)) {
3249
0
        PyErr_BadInternalCall();
3250
0
        return -1;
3251
0
    }
3252
0
    Py_BEGIN_CRITICAL_SECTION(self);
3253
0
    if (Py_SIZE(self) > 1) {
3254
0
        reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
3255
0
    }
3256
0
    Py_END_CRITICAL_SECTION()
3257
0
    return 0;
3258
0
}
3259
3260
PyObject *
3261
PyList_AsTuple(PyObject *v)
3262
30.3k
{
3263
30.3k
    if (v == NULL || !PyList_Check(v)) {
3264
0
        PyErr_BadInternalCall();
3265
0
        return NULL;
3266
0
    }
3267
30.3k
    PyObject *ret;
3268
30.3k
    PyListObject *self = (PyListObject *)v;
3269
30.3k
    Py_BEGIN_CRITICAL_SECTION(self);
3270
30.3k
    ret = PyTuple_FromArray(self->ob_item, Py_SIZE(v));
3271
30.3k
    Py_END_CRITICAL_SECTION();
3272
30.3k
    return ret;
3273
30.3k
}
3274
3275
PyObject *
3276
_PyList_AsTupleAndClear(PyListObject *self)
3277
10
{
3278
10
    assert(self != NULL);
3279
10
    PyObject *ret;
3280
10
    if (self->ob_item == NULL) {
3281
0
        return PyTuple_New(0);
3282
0
    }
3283
10
    Py_BEGIN_CRITICAL_SECTION(self);
3284
10
    PyObject **items = self->ob_item;
3285
10
    Py_ssize_t size = Py_SIZE(self);
3286
10
    Py_SET_SIZE(self, 0);
3287
10
    ret = _PyTuple_FromArraySteal(items, size);
3288
10
    Py_END_CRITICAL_SECTION();
3289
10
    return ret;
3290
10
}
3291
3292
PyObject *
3293
_PyList_FromStackRefStealOnSuccess(const _PyStackRef *src, Py_ssize_t n)
3294
7.07M
{
3295
7.07M
    if (n == 0) {
3296
7.06M
        return PyList_New(0);
3297
7.06M
    }
3298
3299
10.2k
    PyListObject *list = (PyListObject *)PyList_New(n);
3300
10.2k
    if (list == NULL) {
3301
0
        return NULL;
3302
0
    }
3303
3304
10.2k
    PyObject **dst = list->ob_item;
3305
24.4k
    for (Py_ssize_t i = 0; i < n; i++) {
3306
14.2k
        dst[i] = PyStackRef_AsPyObjectSteal(src[i]);
3307
14.2k
    }
3308
3309
10.2k
    return (PyObject *)list;
3310
10.2k
}
3311
3312
/*[clinic input]
3313
list.index
3314
3315
    value: object
3316
    start: slice_index(accept={int}) = 0
3317
    stop: slice_index(accept={int}, c_default="PY_SSIZE_T_MAX") = sys.maxsize
3318
    /
3319
3320
Return first index of value.
3321
3322
Raises ValueError if the value is not present.
3323
[clinic start generated code]*/
3324
3325
static PyObject *
3326
list_index_impl(PyListObject *self, PyObject *value, Py_ssize_t start,
3327
                Py_ssize_t stop)
3328
/*[clinic end generated code: output=ec51b88787e4e481 input=40ec5826303a0eb1]*/
3329
0
{
3330
0
    if (start < 0) {
3331
0
        start += Py_SIZE(self);
3332
0
        if (start < 0)
3333
0
            start = 0;
3334
0
    }
3335
0
    if (stop < 0) {
3336
0
        stop += Py_SIZE(self);
3337
0
        if (stop < 0)
3338
0
            stop = 0;
3339
0
    }
3340
0
    for (Py_ssize_t i = start; i < stop; i++) {
3341
0
        PyObject *obj = list_get_item_ref(self, i);
3342
0
        if (obj == NULL) {
3343
            // out-of-bounds
3344
0
            break;
3345
0
        }
3346
0
        int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
3347
0
        Py_DECREF(obj);
3348
0
        if (cmp > 0)
3349
0
            return PyLong_FromSsize_t(i);
3350
0
        else if (cmp < 0)
3351
0
            return NULL;
3352
0
    }
3353
0
    PyErr_SetString(PyExc_ValueError, "list.index(x): x not in list");
3354
0
    return NULL;
3355
0
}
3356
3357
/*[clinic input]
3358
list.count
3359
3360
     value: object
3361
     /
3362
3363
Return number of occurrences of value.
3364
[clinic start generated code]*/
3365
3366
static PyObject *
3367
list_count_impl(PyListObject *self, PyObject *value)
3368
/*[clinic end generated code: output=eff66f14aef2df86 input=3bdc3a5e6f749565]*/
3369
0
{
3370
0
    Py_ssize_t count = 0;
3371
0
    for (Py_ssize_t i = 0; ; i++) {
3372
0
        PyObject *obj = list_get_item_ref(self, i);
3373
0
        if (obj == NULL) {
3374
            // out-of-bounds
3375
0
            break;
3376
0
        }
3377
0
        if (obj == value) {
3378
0
           count++;
3379
0
           Py_DECREF(obj);
3380
0
           continue;
3381
0
        }
3382
0
        int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
3383
0
        Py_DECREF(obj);
3384
0
        if (cmp > 0)
3385
0
            count++;
3386
0
        else if (cmp < 0)
3387
0
            return NULL;
3388
0
    }
3389
0
    return PyLong_FromSsize_t(count);
3390
0
}
3391
3392
/*[clinic input]
3393
@critical_section
3394
list.remove
3395
3396
     value: object
3397
     /
3398
3399
Remove first occurrence of value.
3400
3401
Raises ValueError if the value is not present.
3402
[clinic start generated code]*/
3403
3404
static PyObject *
3405
list_remove_impl(PyListObject *self, PyObject *value)
3406
/*[clinic end generated code: output=b9b76a6633b18778 input=26c813dbb95aa93b]*/
3407
1.44k
{
3408
1.44k
    Py_ssize_t i;
3409
3410
1.44k
    for (i = 0; i < Py_SIZE(self); i++) {
3411
1.44k
        PyObject *obj = self->ob_item[i];
3412
1.44k
        Py_INCREF(obj);
3413
1.44k
        int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
3414
1.44k
        Py_DECREF(obj);
3415
1.44k
        if (cmp > 0) {
3416
1.44k
            if (list_ass_slice_lock_held(self, i, i+1, NULL) == 0)
3417
1.44k
                Py_RETURN_NONE;
3418
0
            return NULL;
3419
1.44k
        }
3420
0
        else if (cmp < 0)
3421
0
            return NULL;
3422
1.44k
    }
3423
0
    PyErr_SetString(PyExc_ValueError, "list.remove(x): x not in list");
3424
0
    return NULL;
3425
1.44k
}
3426
3427
static int
3428
list_traverse(PyObject *self, visitproc visit, void *arg)
3429
33.7M
{
3430
33.7M
    PyListObject *o = (PyListObject *)self;
3431
33.7M
    Py_ssize_t i;
3432
3433
63.1M
    for (i = Py_SIZE(o); --i >= 0; )
3434
29.3M
        Py_VISIT(o->ob_item[i]);
3435
33.7M
    return 0;
3436
33.7M
}
3437
3438
static PyObject *
3439
list_richcompare_impl(PyObject *v, PyObject *w, int op)
3440
166k
{
3441
166k
    PyListObject *vl, *wl;
3442
166k
    Py_ssize_t i;
3443
3444
166k
    if (!PyList_Check(v) || !PyList_Check(w))
3445
274
        Py_RETURN_NOTIMPLEMENTED;
3446
3447
166k
    vl = (PyListObject *)v;
3448
166k
    wl = (PyListObject *)w;
3449
3450
166k
    if (Py_SIZE(vl) != Py_SIZE(wl) && (op == Py_EQ || op == Py_NE)) {
3451
        /* Shortcut: if the lengths differ, the lists differ */
3452
50.7k
        if (op == Py_EQ)
3453
50.7k
            Py_RETURN_FALSE;
3454
0
        else
3455
0
            Py_RETURN_TRUE;
3456
50.7k
    }
3457
3458
    /* Search for the first index where items are different */
3459
116k
    for (i = 0; i < Py_SIZE(vl) && i < Py_SIZE(wl); i++) {
3460
114k
        PyObject *vitem = vl->ob_item[i];
3461
114k
        PyObject *witem = wl->ob_item[i];
3462
114k
        if (vitem == witem) {
3463
587
            continue;
3464
587
        }
3465
3466
114k
        Py_INCREF(vitem);
3467
114k
        Py_INCREF(witem);
3468
114k
        int k = PyObject_RichCompareBool(vitem, witem, Py_EQ);
3469
114k
        Py_DECREF(vitem);
3470
114k
        Py_DECREF(witem);
3471
114k
        if (k < 0)
3472
0
            return NULL;
3473
114k
        if (!k)
3474
113k
            break;
3475
114k
    }
3476
3477
115k
    if (i >= Py_SIZE(vl) || i >= Py_SIZE(wl)) {
3478
        /* No more items to compare -- compare sizes */
3479
1.94k
        Py_RETURN_RICHCOMPARE(Py_SIZE(vl), Py_SIZE(wl), op);
3480
1.94k
    }
3481
3482
    /* We have an item that differs -- shortcuts for EQ/NE */
3483
113k
    if (op == Py_EQ) {
3484
113k
        Py_RETURN_FALSE;
3485
113k
    }
3486
7
    if (op == Py_NE) {
3487
7
        Py_RETURN_TRUE;
3488
7
    }
3489
3490
    /* Compare the final item again using the proper operator */
3491
0
    PyObject *vitem = vl->ob_item[i];
3492
0
    PyObject *witem = wl->ob_item[i];
3493
0
    Py_INCREF(vitem);
3494
0
    Py_INCREF(witem);
3495
0
    PyObject *result = PyObject_RichCompare(vl->ob_item[i], wl->ob_item[i], op);
3496
0
    Py_DECREF(vitem);
3497
0
    Py_DECREF(witem);
3498
0
    return result;
3499
7
}
3500
3501
static PyObject *
3502
list_richcompare(PyObject *v, PyObject *w, int op)
3503
166k
{
3504
166k
    PyObject *ret;
3505
166k
    Py_BEGIN_CRITICAL_SECTION2(v, w);
3506
166k
    ret = list_richcompare_impl(v, w, op);
3507
166k
    Py_END_CRITICAL_SECTION2()
3508
166k
    return ret;
3509
166k
}
3510
3511
/*[clinic input]
3512
list.__init__
3513
3514
    iterable: object(c_default="NULL") = ()
3515
    /
3516
3517
Built-in mutable sequence.
3518
3519
If no argument is given, the constructor creates a new empty list.
3520
The argument must be an iterable if specified.
3521
[clinic start generated code]*/
3522
3523
static int
3524
list___init___impl(PyListObject *self, PyObject *iterable)
3525
/*[clinic end generated code: output=0f3c21379d01de48 input=b3f3fe7206af8f6b]*/
3526
287k
{
3527
    /* Verify list invariants established by PyType_GenericAlloc() */
3528
287k
    assert(0 <= Py_SIZE(self));
3529
287k
    assert(Py_SIZE(self) <= self->allocated || self->allocated == -1);
3530
287k
    assert(self->ob_item != NULL ||
3531
287k
           self->allocated == 0 || self->allocated == -1);
3532
3533
    /* Empty previous contents */
3534
287k
    if (self->ob_item != NULL) {
3535
0
        Py_BEGIN_CRITICAL_SECTION(self);
3536
0
        list_clear(self);
3537
0
        Py_END_CRITICAL_SECTION();
3538
0
    }
3539
287k
    if (iterable != NULL) {
3540
286k
        if (_list_extend(self, iterable) < 0) {
3541
0
            return -1;
3542
0
        }
3543
286k
    }
3544
287k
    return 0;
3545
287k
}
3546
3547
static PyObject *
3548
list_vectorcall(PyObject *type, PyObject * const*args,
3549
                size_t nargsf, PyObject *kwnames)
3550
286k
{
3551
286k
    if (!_PyArg_NoKwnames("list", kwnames)) {
3552
0
        return NULL;
3553
0
    }
3554
286k
    Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
3555
286k
    if (!_PyArg_CheckPositional("list", nargs, 0, 1)) {
3556
0
        return NULL;
3557
0
    }
3558
3559
286k
    PyObject *list = PyType_GenericAlloc(_PyType_CAST(type), 0);
3560
286k
    if (list == NULL) {
3561
0
        return NULL;
3562
0
    }
3563
286k
    if (nargs) {
3564
286k
        if (list___init___impl((PyListObject *)list, args[0])) {
3565
0
            Py_DECREF(list);
3566
0
            return NULL;
3567
0
        }
3568
286k
    }
3569
286k
    return list;
3570
286k
}
3571
3572
3573
/*[clinic input]
3574
list.__sizeof__
3575
3576
Return the size of the list in memory, in bytes.
3577
[clinic start generated code]*/
3578
3579
static PyObject *
3580
list___sizeof___impl(PyListObject *self)
3581
/*[clinic end generated code: output=3417541f95f9a53e input=b8030a5d5ce8a187]*/
3582
0
{
3583
0
    size_t res = _PyObject_SIZE(Py_TYPE(self));
3584
#ifdef Py_GIL_DISABLED
3585
    PyObject **ob_item = _Py_atomic_load_ptr(&self->ob_item);
3586
    if (ob_item != NULL) {
3587
        res += list_capacity(ob_item) * sizeof(PyObject *);
3588
    }
3589
#else
3590
0
    res += (size_t)self->allocated * sizeof(PyObject *);
3591
0
#endif
3592
0
    return PyLong_FromSize_t(res);
3593
0
}
3594
3595
static PyObject *list_iter(PyObject *seq);
3596
static PyObject *list_subscript(PyObject*, PyObject*);
3597
3598
static PyMethodDef list_methods[] = {
3599
    {"__getitem__", list_subscript, METH_O|METH_COEXIST,
3600
     PyDoc_STR("__getitem__($self, index, /)\n--\n\nReturn self[index].")},
3601
    LIST___REVERSED___METHODDEF
3602
    LIST___SIZEOF___METHODDEF
3603
    PY_LIST_CLEAR_METHODDEF
3604
    LIST_COPY_METHODDEF
3605
    LIST_APPEND_METHODDEF
3606
    LIST_INSERT_METHODDEF
3607
    LIST_EXTEND_METHODDEF
3608
    LIST_POP_METHODDEF
3609
    LIST_REMOVE_METHODDEF
3610
    LIST_INDEX_METHODDEF
3611
    LIST_COUNT_METHODDEF
3612
    LIST_REVERSE_METHODDEF
3613
    LIST_SORT_METHODDEF
3614
    {"__class_getitem__", Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")},
3615
    {NULL,              NULL}           /* sentinel */
3616
};
3617
3618
static PySequenceMethods list_as_sequence = {
3619
    list_length,                                /* sq_length */
3620
    _PyList_Concat,                             /* sq_concat */
3621
    list_repeat,                                /* sq_repeat */
3622
    list_item,                                  /* sq_item */
3623
    0,                                          /* sq_slice */
3624
    list_ass_item,                              /* sq_ass_item */
3625
    0,                                          /* sq_ass_slice */
3626
    list_contains,                              /* sq_contains */
3627
    list_inplace_concat,                        /* sq_inplace_concat */
3628
    list_inplace_repeat,                        /* sq_inplace_repeat */
3629
};
3630
3631
static inline PyObject *
3632
list_slice_step_lock_held(PyListObject *a, Py_ssize_t start, Py_ssize_t step, Py_ssize_t len)
3633
28
{
3634
28
    PyListObject *np = (PyListObject *)list_new_prealloc(len);
3635
28
    if (np == NULL) {
3636
0
        return NULL;
3637
0
    }
3638
28
    size_t cur;
3639
28
    Py_ssize_t i;
3640
28
    PyObject **src = a->ob_item;
3641
28
    PyObject **dest = np->ob_item;
3642
280
    for (cur = start, i = 0; i < len;
3643
252
            cur += (size_t)step, i++) {
3644
252
        PyObject *v = src[cur];
3645
252
        dest[i] = Py_NewRef(v);
3646
252
    }
3647
28
    Py_SET_SIZE(np, len);
3648
28
    return (PyObject *)np;
3649
28
}
3650
3651
static PyObject *
3652
list_slice_wrap(PyListObject *aa, Py_ssize_t start, Py_ssize_t stop, Py_ssize_t step)
3653
1.69M
{
3654
1.69M
    PyObject *res = NULL;
3655
1.69M
    Py_BEGIN_CRITICAL_SECTION(aa);
3656
1.69M
    Py_ssize_t len = PySlice_AdjustIndices(Py_SIZE(aa), &start, &stop, step);
3657
1.69M
    if (len <= 0) {
3658
7
        res = PyList_New(0);
3659
7
    }
3660
1.69M
    else if (step == 1) {
3661
1.69M
        res = list_slice_lock_held(aa, start, stop);
3662
1.69M
    }
3663
28
    else {
3664
28
        res = list_slice_step_lock_held(aa, start, step, len);
3665
28
    }
3666
1.69M
    Py_END_CRITICAL_SECTION();
3667
1.69M
    return res;
3668
1.69M
}
3669
3670
static inline PyObject*
3671
list_slice_subscript(PyObject* self, PyObject* item)
3672
1.69M
{
3673
1.69M
    assert(PyList_Check(self));
3674
1.69M
    assert(PySlice_Check(item));
3675
1.69M
    Py_ssize_t start, stop, step;
3676
1.69M
    if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
3677
0
        return NULL;
3678
0
    }
3679
1.69M
    return list_slice_wrap((PyListObject *)self, start, stop, step);
3680
1.69M
}
3681
3682
PyObject *
3683
_PyList_SliceSubscript(PyObject* _self, PyObject* item)
3684
1.69M
{
3685
1.69M
    return list_slice_subscript(_self, item);
3686
1.69M
}
3687
3688
static PyObject *
3689
list_subscript(PyObject* _self, PyObject* item)
3690
2.12M
{
3691
2.12M
    PyListObject* self = (PyListObject*)_self;
3692
2.12M
    if (_PyIndex_Check(item)) {
3693
2.12M
        Py_ssize_t i;
3694
2.12M
        i = PyNumber_AsSsize_t(item, PyExc_IndexError);
3695
2.12M
        if (i == -1 && PyErr_Occurred())
3696
0
            return NULL;
3697
2.12M
        if (i < 0)
3698
615
            i += PyList_GET_SIZE(self);
3699
2.12M
        return list_item((PyObject *)self, i);
3700
2.12M
    }
3701
64
    else if (PySlice_Check(item)) {
3702
64
        return list_slice_subscript(_self, item);
3703
64
    }
3704
0
    else {
3705
0
        PyErr_Format(PyExc_TypeError,
3706
0
                     "list indices must be integers or slices, not %.200s",
3707
0
                     Py_TYPE(item)->tp_name);
3708
0
        return NULL;
3709
0
    }
3710
2.12M
}
3711
3712
static Py_ssize_t
3713
adjust_slice_indexes(PyListObject *lst,
3714
                     Py_ssize_t *start, Py_ssize_t *stop,
3715
                     Py_ssize_t step)
3716
19.0k
{
3717
19.0k
    Py_ssize_t slicelength = PySlice_AdjustIndices(Py_SIZE(lst), start, stop,
3718
19.0k
                                                   step);
3719
3720
    /* Make sure s[5:2] = [..] inserts at the right place:
3721
        before 5, not before 2. */
3722
19.0k
    if ((step < 0 && *start < *stop) ||
3723
19.0k
        (step > 0 && *start > *stop))
3724
0
        *stop = *start;
3725
3726
19.0k
    return slicelength;
3727
19.0k
}
3728
3729
static int
3730
list_ass_subscript_lock_held(PyObject *_self, PyObject *item, PyObject *value)
3731
1.72M
{
3732
1.72M
    _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(_self);
3733
3734
1.72M
    PyListObject *self = (PyListObject *)_self;
3735
1.72M
    if (_PyIndex_Check(item)) {
3736
1.70M
        Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
3737
1.70M
        if (i == -1 && PyErr_Occurred())
3738
0
            return -1;
3739
1.70M
        if (i < 0)
3740
1.69M
            i += PyList_GET_SIZE(self);
3741
1.70M
        return list_ass_item_lock_held(self, i, value);
3742
1.70M
    }
3743
19.0k
    else if (PySlice_Check(item)) {
3744
19.0k
        Py_ssize_t start, stop, step;
3745
3746
19.0k
        if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
3747
0
            return -1;
3748
0
        }
3749
3750
19.0k
        if (value == NULL) {
3751
            /* delete slice */
3752
7
            PyObject **garbage;
3753
7
            size_t cur;
3754
7
            Py_ssize_t i;
3755
7
            int res;
3756
3757
7
            Py_ssize_t slicelength = adjust_slice_indexes(self, &start, &stop,
3758
7
                                                          step);
3759
3760
7
            if (step == 1)
3761
7
                return list_ass_slice_lock_held(self, start, stop, value);
3762
3763
0
            if (slicelength <= 0)
3764
0
                return 0;
3765
3766
0
            if (step < 0) {
3767
0
                stop = start + 1;
3768
0
                start = stop + step*(slicelength - 1) - 1;
3769
0
                step = -step;
3770
0
            }
3771
3772
0
            garbage = (PyObject**)
3773
0
                PyMem_Malloc(slicelength*sizeof(PyObject*));
3774
0
            if (!garbage) {
3775
0
                PyErr_NoMemory();
3776
0
                return -1;
3777
0
            }
3778
3779
            /* drawing pictures might help understand these for
3780
               loops. Basically, we memmove the parts of the
3781
               list that are *not* part of the slice: step-1
3782
               items for each item that is part of the slice,
3783
               and then tail end of the list that was not
3784
               covered by the slice */
3785
0
            for (cur = start, i = 0;
3786
0
                 cur < (size_t)stop;
3787
0
                 cur += step, i++) {
3788
0
                Py_ssize_t lim = step - 1;
3789
3790
0
                garbage[i] = PyList_GET_ITEM(self, cur);
3791
3792
0
                if (cur + step >= (size_t)Py_SIZE(self)) {
3793
0
                    lim = Py_SIZE(self) - cur - 1;
3794
0
                }
3795
3796
0
                memmove(self->ob_item + cur - i,
3797
0
                    self->ob_item + cur + 1,
3798
0
                    lim * sizeof(PyObject *));
3799
0
            }
3800
0
            cur = start + (size_t)slicelength * step;
3801
0
            if (cur < (size_t)Py_SIZE(self)) {
3802
0
                memmove(self->ob_item + cur - slicelength,
3803
0
                    self->ob_item + cur,
3804
0
                    (Py_SIZE(self) - cur) *
3805
0
                     sizeof(PyObject *));
3806
0
            }
3807
3808
0
            Py_SET_SIZE(self, Py_SIZE(self) - slicelength);
3809
0
            res = list_resize(self, Py_SIZE(self));
3810
3811
0
            for (i = 0; i < slicelength; i++) {
3812
0
                Py_DECREF(garbage[i]);
3813
0
            }
3814
0
            PyMem_Free(garbage);
3815
3816
0
            return res;
3817
0
        }
3818
19.0k
        else {
3819
            /* assign slice */
3820
19.0k
            PyObject *ins, *seq;
3821
19.0k
            PyObject **garbage, **seqitems, **selfitems;
3822
19.0k
            Py_ssize_t i;
3823
19.0k
            size_t cur;
3824
3825
            /* protect against a[::-1] = a */
3826
19.0k
            if (self == (PyListObject*)value) {
3827
0
                seq = list_slice_lock_held((PyListObject *)value, 0,
3828
0
                                            Py_SIZE(value));
3829
0
            }
3830
19.0k
            else {
3831
19.0k
                seq = PySequence_Fast(value,
3832
19.0k
                                      "must assign iterable "
3833
19.0k
                                      "to extended slice");
3834
19.0k
            }
3835
19.0k
            if (!seq)
3836
0
                return -1;
3837
3838
19.0k
            Py_ssize_t slicelength = adjust_slice_indexes(self, &start, &stop,
3839
19.0k
                                                          step);
3840
3841
19.0k
            if (step == 1) {
3842
19.0k
                int res = list_ass_slice_lock_held(self, start, stop, seq);
3843
19.0k
                Py_DECREF(seq);
3844
19.0k
                return res;
3845
19.0k
            }
3846
3847
0
            if (PySequence_Fast_GET_SIZE(seq) != slicelength) {
3848
0
                PyErr_Format(PyExc_ValueError,
3849
0
                    "attempt to assign sequence of "
3850
0
                    "size %zd to extended slice of "
3851
0
                    "size %zd",
3852
0
                         PySequence_Fast_GET_SIZE(seq),
3853
0
                         slicelength);
3854
0
                Py_DECREF(seq);
3855
0
                return -1;
3856
0
            }
3857
3858
0
            if (!slicelength) {
3859
0
                Py_DECREF(seq);
3860
0
                return 0;
3861
0
            }
3862
3863
0
            garbage = (PyObject**)
3864
0
                PyMem_Malloc(slicelength*sizeof(PyObject*));
3865
0
            if (!garbage) {
3866
0
                Py_DECREF(seq);
3867
0
                PyErr_NoMemory();
3868
0
                return -1;
3869
0
            }
3870
3871
0
            selfitems = self->ob_item;
3872
0
            seqitems = PySequence_Fast_ITEMS(seq);
3873
0
            for (cur = start, i = 0; i < slicelength;
3874
0
                 cur += (size_t)step, i++) {
3875
0
                garbage[i] = selfitems[cur];
3876
0
                ins = Py_NewRef(seqitems[i]);
3877
0
                FT_ATOMIC_STORE_PTR_RELEASE(selfitems[cur], ins);
3878
0
            }
3879
3880
0
            for (i = 0; i < slicelength; i++) {
3881
0
                Py_DECREF(garbage[i]);
3882
0
            }
3883
3884
0
            PyMem_Free(garbage);
3885
0
            Py_DECREF(seq);
3886
3887
0
            return 0;
3888
0
        }
3889
19.0k
    }
3890
0
    else {
3891
0
        PyErr_Format(PyExc_TypeError,
3892
0
                     "list indices must be integers or slices, not %.200s",
3893
0
                     Py_TYPE(item)->tp_name);
3894
0
        return -1;
3895
0
    }
3896
1.72M
}
3897
3898
static int
3899
list_ass_subscript(PyObject *self, PyObject *item, PyObject *value)
3900
1.72M
{
3901
1.72M
    int res;
3902
#ifdef Py_GIL_DISABLED
3903
    if (PySlice_Check(item) && value != NULL && PyList_CheckExact(value)) {
3904
        Py_BEGIN_CRITICAL_SECTION2(self, value);
3905
        res = list_ass_subscript_lock_held(self, item, value);
3906
        Py_END_CRITICAL_SECTION2();
3907
        return res;
3908
    }
3909
#endif
3910
1.72M
    Py_BEGIN_CRITICAL_SECTION(self);
3911
1.72M
    res = list_ass_subscript_lock_held(self, item, value);
3912
1.72M
    Py_END_CRITICAL_SECTION();
3913
1.72M
    return res;
3914
1.72M
}
3915
3916
static _PyObjectIndexPair
3917
list_iteritem(PyObject *obj, Py_ssize_t index)
3918
777k
{
3919
777k
    PyObject *result = list_get_item_ref((PyListObject *)obj, index);
3920
777k
    return (_PyObjectIndexPair) { .object = result, .index = index + 1 };
3921
777k
}
3922
3923
static PyMappingMethods list_as_mapping = {
3924
    list_length,
3925
    list_subscript,
3926
    list_ass_subscript
3927
};
3928
3929
PyTypeObject PyList_Type = {
3930
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
3931
    "list",
3932
    sizeof(PyListObject),
3933
    0,
3934
    list_dealloc,                               /* tp_dealloc */
3935
    0,                                          /* tp_vectorcall_offset */
3936
    0,                                          /* tp_getattr */
3937
    0,                                          /* tp_setattr */
3938
    0,                                          /* tp_as_async */
3939
    list_repr,                                  /* tp_repr */
3940
    0,                                          /* tp_as_number */
3941
    &list_as_sequence,                          /* tp_as_sequence */
3942
    &list_as_mapping,                           /* tp_as_mapping */
3943
    PyObject_HashNotImplemented,                /* tp_hash */
3944
    0,                                          /* tp_call */
3945
    0,                                          /* tp_str */
3946
    PyObject_GenericGetAttr,                    /* tp_getattro */
3947
    0,                                          /* tp_setattro */
3948
    0,                                          /* tp_as_buffer */
3949
    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
3950
        Py_TPFLAGS_BASETYPE | Py_TPFLAGS_LIST_SUBCLASS |
3951
        _Py_TPFLAGS_MATCH_SELF | Py_TPFLAGS_SEQUENCE,  /* tp_flags */
3952
    list___init____doc__,                       /* tp_doc */
3953
    list_traverse,                              /* tp_traverse */
3954
    list_clear_slot,                            /* tp_clear */
3955
    list_richcompare,                           /* tp_richcompare */
3956
    0,                                          /* tp_weaklistoffset */
3957
    list_iter,                                  /* tp_iter */
3958
    0,                                          /* tp_iternext */
3959
    list_methods,                               /* tp_methods */
3960
    0,                                          /* tp_members */
3961
    0,                                          /* tp_getset */
3962
    0,                                          /* tp_base */
3963
    0,                                          /* tp_dict */
3964
    0,                                          /* tp_descr_get */
3965
    0,                                          /* tp_descr_set */
3966
    0,                                          /* tp_dictoffset */
3967
    list___init__,                              /* tp_init */
3968
    PyType_GenericAlloc,                        /* tp_alloc */
3969
    PyType_GenericNew,                          /* tp_new */
3970
    PyObject_GC_Del,                            /* tp_free */
3971
    .tp_vectorcall = list_vectorcall,
3972
    .tp_version_tag = _Py_TYPE_VERSION_LIST,
3973
    ._tp_iteritem = list_iteritem,
3974
};
3975
3976
/*********************** List Iterator **************************/
3977
3978
static void listiter_dealloc(PyObject *);
3979
static int listiter_traverse(PyObject *, visitproc, void *);
3980
static PyObject *listiter_next(PyObject *);
3981
static PyObject *listiter_len(PyObject *, PyObject *);
3982
static PyObject *listiter_reduce_general(void *_it, int forward);
3983
static PyObject *listiter_reduce(PyObject *, PyObject *);
3984
static PyObject *listiter_setstate(PyObject *, PyObject *state);
3985
3986
PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it)).");
3987
PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");
3988
PyDoc_STRVAR(setstate_doc, "Set state information for unpickling.");
3989
3990
static PyMethodDef listiter_methods[] = {
3991
    {"__length_hint__", listiter_len, METH_NOARGS, length_hint_doc},
3992
    {"__reduce__", listiter_reduce, METH_NOARGS, reduce_doc},
3993
    {"__setstate__", listiter_setstate, METH_O, setstate_doc},
3994
    {NULL,              NULL}           /* sentinel */
3995
};
3996
3997
PyTypeObject PyListIter_Type = {
3998
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
3999
    "list_iterator",                            /* tp_name */
4000
    sizeof(_PyListIterObject),                  /* tp_basicsize */
4001
    0,                                          /* tp_itemsize */
4002
    /* methods */
4003
    listiter_dealloc,               /* tp_dealloc */
4004
    0,                                          /* tp_vectorcall_offset */
4005
    0,                                          /* tp_getattr */
4006
    0,                                          /* tp_setattr */
4007
    0,                                          /* tp_as_async */
4008
    0,                                          /* tp_repr */
4009
    0,                                          /* tp_as_number */
4010
    0,                                          /* tp_as_sequence */
4011
    0,                                          /* tp_as_mapping */
4012
    0,                                          /* tp_hash */
4013
    0,                                          /* tp_call */
4014
    0,                                          /* tp_str */
4015
    PyObject_GenericGetAttr,                    /* tp_getattro */
4016
    0,                                          /* tp_setattro */
4017
    0,                                          /* tp_as_buffer */
4018
    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
4019
    0,                                          /* tp_doc */
4020
    listiter_traverse,                          /* tp_traverse */
4021
    0,                                          /* tp_clear */
4022
    0,                                          /* tp_richcompare */
4023
    0,                                          /* tp_weaklistoffset */
4024
    PyObject_SelfIter,                          /* tp_iter */
4025
    listiter_next,                              /* tp_iternext */
4026
    listiter_methods,                           /* tp_methods */
4027
    0,                                          /* tp_members */
4028
};
4029
4030
4031
static PyObject *
4032
list_iter(PyObject *seq)
4033
289k
{
4034
289k
    if (!PyList_Check(seq)) {
4035
0
        PyErr_BadInternalCall();
4036
0
        return NULL;
4037
0
    }
4038
289k
    _PyListIterObject *it = _Py_FREELIST_POP(_PyListIterObject, list_iters);
4039
289k
    if (it == NULL) {
4040
29
        it = PyObject_GC_New(_PyListIterObject, &PyListIter_Type);
4041
29
        if (it == NULL) {
4042
0
            return NULL;
4043
0
        }
4044
29
    }
4045
289k
    it->it_index = 0;
4046
289k
    it->it_seq = (PyListObject *)Py_NewRef(seq);
4047
289k
    _PyObject_GC_TRACK(it);
4048
289k
    return (PyObject *)it;
4049
289k
}
4050
4051
static void
4052
listiter_dealloc(PyObject *self)
4053
289k
{
4054
289k
    _PyListIterObject *it = (_PyListIterObject *)self;
4055
289k
    _PyObject_GC_UNTRACK(it);
4056
289k
    Py_XDECREF(it->it_seq);
4057
289k
    assert(Py_IS_TYPE(self, &PyListIter_Type));
4058
289k
    _Py_FREELIST_FREE(list_iters, it, PyObject_GC_Del);
4059
289k
}
4060
4061
static int
4062
listiter_traverse(PyObject *it, visitproc visit, void *arg)
4063
0
{
4064
0
    Py_VISIT(((_PyListIterObject *)it)->it_seq);
4065
0
    return 0;
4066
0
}
4067
4068
static PyObject *
4069
listiter_next(PyObject *self)
4070
32.7M
{
4071
32.7M
    _PyListIterObject *it = (_PyListIterObject *)self;
4072
32.7M
    Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4073
32.7M
    if (index < 0) {
4074
168
        return NULL;
4075
168
    }
4076
4077
32.7M
    PyObject *item = list_get_item_ref(it->it_seq, index);
4078
32.7M
    if (item == NULL) {
4079
        // out-of-bounds
4080
289k
        FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, -1);
4081
289k
#ifndef Py_GIL_DISABLED
4082
289k
        PyListObject *seq = it->it_seq;
4083
289k
        it->it_seq = NULL;
4084
289k
        Py_DECREF(seq);
4085
289k
#endif
4086
289k
        return NULL;
4087
289k
    }
4088
32.4M
    FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index + 1);
4089
32.4M
    return item;
4090
32.7M
}
4091
4092
static PyObject *
4093
listiter_len(PyObject *self, PyObject *Py_UNUSED(ignored))
4094
0
{
4095
0
    assert(self != NULL);
4096
0
    _PyListIterObject *it = (_PyListIterObject *)self;
4097
0
    Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4098
0
    if (index >= 0) {
4099
0
        Py_ssize_t len = PyList_GET_SIZE(it->it_seq) - index;
4100
0
        if (len >= 0)
4101
0
            return PyLong_FromSsize_t(len);
4102
0
    }
4103
0
    return PyLong_FromLong(0);
4104
0
}
4105
4106
static PyObject *
4107
listiter_reduce(PyObject *it, PyObject *Py_UNUSED(ignored))
4108
0
{
4109
0
    return listiter_reduce_general(it, 1);
4110
0
}
4111
4112
static PyObject *
4113
listiter_setstate(PyObject *self, PyObject *state)
4114
0
{
4115
0
    _PyListIterObject *it = (_PyListIterObject *)self;
4116
0
    Py_ssize_t index = PyLong_AsSsize_t(state);
4117
0
    if (index == -1 && PyErr_Occurred())
4118
0
        return NULL;
4119
0
    if (it->it_seq != NULL) {
4120
0
        if (index < -1)
4121
0
            index = -1;
4122
0
        else if (index > PyList_GET_SIZE(it->it_seq))
4123
0
            index = PyList_GET_SIZE(it->it_seq); /* iterator exhausted */
4124
0
        FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index);
4125
0
    }
4126
0
    Py_RETURN_NONE;
4127
0
}
4128
4129
/*********************** List Reverse Iterator **************************/
4130
4131
typedef struct {
4132
    PyObject_HEAD
4133
    Py_ssize_t it_index;
4134
    PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
4135
} listreviterobject;
4136
4137
static void listreviter_dealloc(PyObject *);
4138
static int listreviter_traverse(PyObject *, visitproc, void *);
4139
static PyObject *listreviter_next(PyObject *);
4140
static PyObject *listreviter_len(PyObject *, PyObject *);
4141
static PyObject *listreviter_reduce(PyObject *, PyObject *);
4142
static PyObject *listreviter_setstate(PyObject *, PyObject *);
4143
4144
static PyMethodDef listreviter_methods[] = {
4145
    {"__length_hint__", listreviter_len, METH_NOARGS, length_hint_doc},
4146
    {"__reduce__", listreviter_reduce, METH_NOARGS, reduce_doc},
4147
    {"__setstate__", listreviter_setstate, METH_O, setstate_doc},
4148
    {NULL,              NULL}           /* sentinel */
4149
};
4150
4151
PyTypeObject PyListRevIter_Type = {
4152
    PyVarObject_HEAD_INIT(&PyType_Type, 0)
4153
    "list_reverseiterator",                     /* tp_name */
4154
    sizeof(listreviterobject),                  /* tp_basicsize */
4155
    0,                                          /* tp_itemsize */
4156
    /* methods */
4157
    listreviter_dealloc,                        /* tp_dealloc */
4158
    0,                                          /* tp_vectorcall_offset */
4159
    0,                                          /* tp_getattr */
4160
    0,                                          /* tp_setattr */
4161
    0,                                          /* tp_as_async */
4162
    0,                                          /* tp_repr */
4163
    0,                                          /* tp_as_number */
4164
    0,                                          /* tp_as_sequence */
4165
    0,                                          /* tp_as_mapping */
4166
    0,                                          /* tp_hash */
4167
    0,                                          /* tp_call */
4168
    0,                                          /* tp_str */
4169
    PyObject_GenericGetAttr,                    /* tp_getattro */
4170
    0,                                          /* tp_setattro */
4171
    0,                                          /* tp_as_buffer */
4172
    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
4173
    0,                                          /* tp_doc */
4174
    listreviter_traverse,                       /* tp_traverse */
4175
    0,                                          /* tp_clear */
4176
    0,                                          /* tp_richcompare */
4177
    0,                                          /* tp_weaklistoffset */
4178
    PyObject_SelfIter,                          /* tp_iter */
4179
    listreviter_next,                           /* tp_iternext */
4180
    listreviter_methods,                /* tp_methods */
4181
    0,
4182
};
4183
4184
/*[clinic input]
4185
list.__reversed__
4186
4187
Return a reverse iterator over the list.
4188
[clinic start generated code]*/
4189
4190
static PyObject *
4191
list___reversed___impl(PyListObject *self)
4192
/*[clinic end generated code: output=b166f073208c888c input=eadb6e17f8a6a280]*/
4193
22
{
4194
22
    listreviterobject *it;
4195
4196
22
    it = PyObject_GC_New(listreviterobject, &PyListRevIter_Type);
4197
22
    if (it == NULL)
4198
0
        return NULL;
4199
22
    assert(PyList_Check(self));
4200
22
    it->it_index = PyList_GET_SIZE(self) - 1;
4201
22
    it->it_seq = (PyListObject*)Py_NewRef(self);
4202
22
    PyObject_GC_Track(it);
4203
22
    return (PyObject *)it;
4204
22
}
4205
4206
static void
4207
listreviter_dealloc(PyObject *self)
4208
22
{
4209
22
    listreviterobject *it = (listreviterobject *)self;
4210
22
    PyObject_GC_UnTrack(it);
4211
22
    Py_XDECREF(it->it_seq);
4212
22
    PyObject_GC_Del(it);
4213
22
}
4214
4215
static int
4216
listreviter_traverse(PyObject *it, visitproc visit, void *arg)
4217
0
{
4218
0
    Py_VISIT(((listreviterobject *)it)->it_seq);
4219
0
    return 0;
4220
0
}
4221
4222
static PyObject *
4223
listreviter_next(PyObject *self)
4224
0
{
4225
0
    listreviterobject *it = (listreviterobject *)self;
4226
0
    assert(it != NULL);
4227
0
    Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4228
0
    if (index < 0) {
4229
0
        return NULL;
4230
0
    }
4231
4232
0
    PyListObject *seq = it->it_seq;
4233
0
    assert(PyList_Check(seq));
4234
0
    PyObject *item = list_get_item_ref(seq, index);
4235
0
    if (item != NULL) {
4236
0
        FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index - 1);
4237
0
        return item;
4238
0
    }
4239
0
    FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, -1);
4240
0
#ifndef Py_GIL_DISABLED
4241
0
    it->it_seq = NULL;
4242
0
    Py_DECREF(seq);
4243
0
#endif
4244
0
    return NULL;
4245
0
}
4246
4247
static PyObject *
4248
listreviter_len(PyObject *self, PyObject *Py_UNUSED(ignored))
4249
0
{
4250
0
    listreviterobject *it = (listreviterobject *)self;
4251
0
    Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4252
0
    Py_ssize_t len = index + 1;
4253
0
    if (it->it_seq == NULL || PyList_GET_SIZE(it->it_seq) < len)
4254
0
        len = 0;
4255
0
    return PyLong_FromSsize_t(len);
4256
0
}
4257
4258
static PyObject *
4259
listreviter_reduce(PyObject *it, PyObject *Py_UNUSED(ignored))
4260
0
{
4261
0
    return listiter_reduce_general(it, 0);
4262
0
}
4263
4264
static PyObject *
4265
listreviter_setstate(PyObject *self, PyObject *state)
4266
0
{
4267
0
    listreviterobject *it = (listreviterobject *)self;
4268
0
    Py_ssize_t index = PyLong_AsSsize_t(state);
4269
0
    if (index == -1 && PyErr_Occurred())
4270
0
        return NULL;
4271
0
    if (it->it_seq != NULL) {
4272
0
        if (index < -1)
4273
0
            index = -1;
4274
0
        else if (index > PyList_GET_SIZE(it->it_seq) - 1)
4275
0
            index = PyList_GET_SIZE(it->it_seq) - 1;
4276
0
        FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index);
4277
0
    }
4278
0
    Py_RETURN_NONE;
4279
0
}
4280
4281
/* common pickling support */
4282
4283
static PyObject *
4284
listiter_reduce_general(void *_it, int forward)
4285
0
{
4286
0
    PyObject *list;
4287
0
    PyObject *iter;
4288
4289
    /* _PyEval_GetBuiltin can invoke arbitrary code,
4290
     * call must be before access of iterator pointers.
4291
     * see issue #101765 */
4292
4293
0
    if (forward) {
4294
0
        iter = _PyEval_GetBuiltin(&_Py_ID(iter));
4295
0
        _PyListIterObject *it = (_PyListIterObject *)_it;
4296
0
        Py_ssize_t idx = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4297
0
        if (idx >= 0) {
4298
0
            return Py_BuildValue("N(O)n", iter, it->it_seq, idx);
4299
0
        }
4300
0
    } else {
4301
0
        iter = _PyEval_GetBuiltin(&_Py_ID(reversed));
4302
0
        listreviterobject *it = (listreviterobject *)_it;
4303
0
        Py_ssize_t idx = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
4304
0
        if (idx >= 0) {
4305
0
            return Py_BuildValue("N(O)n", iter, it->it_seq, idx);
4306
0
        }
4307
0
    }
4308
    /* empty iterator, create an empty list */
4309
0
    list = PyList_New(0);
4310
0
    if (list == NULL) {
4311
0
        Py_DECREF(iter);
4312
0
        return NULL;
4313
0
    }
4314
0
    return Py_BuildValue("N(N)", iter, list);
4315
0
}