/src/cpython/Objects/listobject.c
Line | Count | Source |
1 | | /* List object implementation */ |
2 | | |
3 | | #include "Python.h" |
4 | | #include "pycore_abstract.h" // _PyIndex_Check() |
5 | | #include "pycore_ceval.h" // _PyEval_GetBuiltin() |
6 | | #include "pycore_critical_section.h" // _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED() |
7 | | #include "pycore_dict.h" // _PyDictViewObject |
8 | | #include "pycore_freelist.h" // _Py_FREELIST_FREE(), _Py_FREELIST_POP() |
9 | | #include "pycore_interp.h" // PyInterpreterState.list |
10 | | #include "pycore_list.h" // struct _Py_list_freelist, _PyListIterObject |
11 | | #include "pycore_long.h" // _PyLong_DigitCount |
12 | | #include "pycore_modsupport.h" // _PyArg_NoKwnames() |
13 | | #include "pycore_object.h" // _PyObject_GC_TRACK(), _PyDebugAllocatorStats() |
14 | | #include "pycore_pyatomic_ft_wrappers.h" |
15 | | #include "pycore_setobject.h" // _PySet_NextEntry() |
16 | | #include "pycore_stackref.h" // _Py_TryIncrefCompareStackRef() |
17 | | #include "pycore_tuple.h" // _PyTuple_FromArraySteal() |
18 | | #include "pycore_typeobject.h" // _Py_TYPE_VERSION_LIST |
19 | | #include <stddef.h> |
20 | | |
21 | | /*[clinic input] |
22 | | class list "PyListObject *" "&PyList_Type" |
23 | | [clinic start generated code]*/ |
24 | | /*[clinic end generated code: output=da39a3ee5e6b4b0d input=f9b222678f9f71e0]*/ |
25 | | |
26 | | #include "clinic/listobject.c.h" |
27 | | |
28 | | _Py_DECLARE_STR(list_err, "list index out of range"); |
29 | | |
30 | | #ifdef Py_GIL_DISABLED |
31 | | typedef struct { |
32 | | Py_ssize_t allocated; |
33 | | PyObject *ob_item[]; |
34 | | } _PyListArray; |
35 | | |
36 | | static _PyListArray * |
37 | | list_allocate_array(size_t capacity) |
38 | | { |
39 | | if (capacity > PY_SSIZE_T_MAX/sizeof(PyObject*) - 1) { |
40 | | return NULL; |
41 | | } |
42 | | _PyListArray *array = PyMem_Malloc(sizeof(_PyListArray) + capacity * sizeof(PyObject *)); |
43 | | if (array == NULL) { |
44 | | return NULL; |
45 | | } |
46 | | array->allocated = capacity; |
47 | | return array; |
48 | | } |
49 | | |
50 | | static Py_ssize_t |
51 | | list_capacity(PyObject **items) |
52 | | { |
53 | | _PyListArray *array = _Py_CONTAINER_OF(items, _PyListArray, ob_item); |
54 | | return array->allocated; |
55 | | } |
56 | | #endif |
57 | | |
58 | | static void |
59 | | free_list_items(PyObject** items, bool use_qsbr) |
60 | 148M | { |
61 | | #ifdef Py_GIL_DISABLED |
62 | | _PyListArray *array = _Py_CONTAINER_OF(items, _PyListArray, ob_item); |
63 | | if (use_qsbr) { |
64 | | size_t size = sizeof(_PyListArray) + array->allocated * sizeof(PyObject *); |
65 | | _PyMem_FreeDelayed(array, size); |
66 | | } |
67 | | else { |
68 | | PyMem_Free(array); |
69 | | } |
70 | | #else |
71 | 148M | PyMem_Free(items); |
72 | 148M | #endif |
73 | 148M | } |
74 | | |
75 | | static void |
76 | | ensure_shared_on_resize(PyListObject *self) |
77 | 111M | { |
78 | | #ifdef Py_GIL_DISABLED |
79 | | // We can't use _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED here because |
80 | | // the `CALL_LIST_APPEND` bytecode handler may lock the list without |
81 | | // a critical section. |
82 | | assert(Py_REFCNT(self) == 1 || PyMutex_IsLocked(&_PyObject_CAST(self)->ob_mutex)); |
83 | | |
84 | | // Ensure that the list array is freed using QSBR if we are not the |
85 | | // owning thread. |
86 | | if (!_Py_IsOwnedByCurrentThread((PyObject *)self) && |
87 | | !_PyObject_GC_IS_SHARED(self)) |
88 | | { |
89 | | _PyObject_GC_SET_SHARED(self); |
90 | | } |
91 | | #endif |
92 | 111M | } |
93 | | |
94 | | /* Ensure ob_item has room for at least newsize elements, and set |
95 | | * ob_size to newsize. If newsize > ob_size on entry, the content |
96 | | * of the new slots at exit is undefined heap trash; it's the caller's |
97 | | * responsibility to overwrite them with sane values. |
98 | | * The number of allocated elements may grow, shrink, or stay the same. |
99 | | * Failure is impossible if newsize <= self.allocated on entry. |
100 | | * Note that self->ob_item may change, and even if newsize is less |
101 | | * than ob_size on entry. |
102 | | */ |
103 | | static int |
104 | | list_resize(PyListObject *self, Py_ssize_t newsize) |
105 | 129M | { |
106 | 129M | size_t new_allocated, target_bytes; |
107 | 129M | Py_ssize_t allocated = self->allocated; |
108 | | |
109 | | /* Bypass realloc() when a previous overallocation is large enough |
110 | | to accommodate the newsize. If the newsize falls lower than half |
111 | | the allocated size, then proceed with the realloc() to shrink the list. |
112 | | */ |
113 | 129M | if (allocated >= newsize && newsize >= (allocated >> 1)) { |
114 | 18.0M | assert(self->ob_item != NULL || newsize == 0); |
115 | 18.0M | Py_SET_SIZE(self, newsize); |
116 | 18.0M | return 0; |
117 | 18.0M | } |
118 | | |
119 | | /* This over-allocates proportional to the list size, making room |
120 | | * for additional growth. The over-allocation is mild, but is |
121 | | * enough to give linear-time amortized behavior over a long |
122 | | * sequence of appends() in the presence of a poorly-performing |
123 | | * system realloc(). |
124 | | * Add padding to make the allocated size multiple of 4. |
125 | | * The growth pattern is: 0, 4, 8, 16, 24, 32, 40, 52, 64, 76, ... |
126 | | * Note: new_allocated won't overflow because the largest possible value |
127 | | * is PY_SSIZE_T_MAX * (9 / 8) + 6 which always fits in a size_t. |
128 | | */ |
129 | 111M | new_allocated = ((size_t)newsize + (newsize >> 3) + 6) & ~(size_t)3; |
130 | | /* Do not overallocate if the new size is closer to overallocated size |
131 | | * than to the old size. |
132 | | */ |
133 | 111M | if (newsize - Py_SIZE(self) > (Py_ssize_t)(new_allocated - newsize)) |
134 | 96.9k | new_allocated = ((size_t)newsize + 3) & ~(size_t)3; |
135 | | |
136 | 111M | if (newsize == 0) |
137 | 11.4k | new_allocated = 0; |
138 | | |
139 | 111M | ensure_shared_on_resize(self); |
140 | | |
141 | | #ifdef Py_GIL_DISABLED |
142 | | _PyListArray *array = list_allocate_array(new_allocated); |
143 | | if (array == NULL) { |
144 | | if (newsize < allocated) { |
145 | | // Never fail when shrinking allocations |
146 | | Py_SET_SIZE(self, newsize); |
147 | | return 0; |
148 | | } |
149 | | PyErr_NoMemory(); |
150 | | return -1; |
151 | | } |
152 | | PyObject **old_items = self->ob_item; |
153 | | if (self->ob_item) { |
154 | | if (new_allocated < (size_t)allocated) { |
155 | | target_bytes = new_allocated * sizeof(PyObject*); |
156 | | } |
157 | | else { |
158 | | target_bytes = allocated * sizeof(PyObject*); |
159 | | } |
160 | | memcpy(array->ob_item, self->ob_item, target_bytes); |
161 | | } |
162 | | if (new_allocated > (size_t)allocated) { |
163 | | memset(array->ob_item + allocated, 0, sizeof(PyObject *) * (new_allocated - allocated)); |
164 | | } |
165 | | _Py_atomic_store_ptr_release(&self->ob_item, &array->ob_item); |
166 | | self->allocated = new_allocated; |
167 | | Py_SET_SIZE(self, newsize); |
168 | | if (old_items != NULL) { |
169 | | free_list_items(old_items, _PyObject_GC_IS_SHARED(self)); |
170 | | } |
171 | | #else |
172 | 111M | PyObject **items; |
173 | 111M | if (new_allocated <= (size_t)PY_SSIZE_T_MAX / sizeof(PyObject *)) { |
174 | 111M | target_bytes = new_allocated * sizeof(PyObject *); |
175 | 111M | items = (PyObject **)PyMem_Realloc(self->ob_item, target_bytes); |
176 | 111M | } |
177 | 0 | else { |
178 | | // integer overflow |
179 | 0 | items = NULL; |
180 | 0 | } |
181 | 111M | if (items == NULL) { |
182 | 0 | if (newsize < allocated) { |
183 | | // Never fail when shrinking allocations |
184 | 0 | Py_SET_SIZE(self, newsize); |
185 | 0 | return 0; |
186 | 0 | } |
187 | 0 | PyErr_NoMemory(); |
188 | 0 | return -1; |
189 | 0 | } |
190 | 111M | self->ob_item = items; |
191 | 111M | Py_SET_SIZE(self, newsize); |
192 | 111M | self->allocated = new_allocated; |
193 | 111M | #endif |
194 | 111M | return 0; |
195 | 111M | } |
196 | | |
197 | | static int |
198 | | list_preallocate_exact(PyListObject *self, Py_ssize_t size) |
199 | 6.59M | { |
200 | 6.59M | PyObject **items; |
201 | 6.59M | assert(self->ob_item == NULL); |
202 | 6.59M | assert(size > 0); |
203 | | |
204 | | /* Since the Python memory allocator has granularity of 16 bytes on 64-bit |
205 | | * platforms (8 on 32-bit), there is no benefit of allocating space for |
206 | | * the odd number of items, and there is no drawback of rounding the |
207 | | * allocated size up to the nearest even number. |
208 | | */ |
209 | 6.59M | size = (size + 1) & ~(size_t)1; |
210 | | #ifdef Py_GIL_DISABLED |
211 | | _PyListArray *array = list_allocate_array(size); |
212 | | if (array == NULL) { |
213 | | PyErr_NoMemory(); |
214 | | return -1; |
215 | | } |
216 | | items = array->ob_item; |
217 | | memset(items, 0, size * sizeof(PyObject *)); |
218 | | #else |
219 | 6.59M | items = PyMem_New(PyObject*, size); |
220 | 6.59M | if (items == NULL) { |
221 | 0 | PyErr_NoMemory(); |
222 | 0 | return -1; |
223 | 0 | } |
224 | 6.59M | #endif |
225 | 6.59M | FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item, items); |
226 | 6.59M | self->allocated = size; |
227 | 6.59M | return 0; |
228 | 6.59M | } |
229 | | |
230 | | /* Print summary info about the state of the optimized allocator */ |
231 | | void |
232 | | _PyList_DebugMallocStats(FILE *out) |
233 | 0 | { |
234 | 0 | _PyDebugAllocatorStats(out, |
235 | 0 | "free PyListObject", |
236 | 0 | _Py_FREELIST_SIZE(lists), |
237 | 0 | sizeof(PyListObject)); |
238 | 0 | } |
239 | | |
240 | | PyObject * |
241 | | PyList_New(Py_ssize_t size) |
242 | 310M | { |
243 | 310M | if (size < 0) { |
244 | 0 | PyErr_BadInternalCall(); |
245 | 0 | return NULL; |
246 | 0 | } |
247 | | |
248 | 310M | PyListObject *op = _Py_FREELIST_POP(PyListObject, lists); |
249 | 310M | if (op == NULL) { |
250 | 48.9M | op = PyObject_GC_New(PyListObject, &PyList_Type); |
251 | 48.9M | if (op == NULL) { |
252 | 0 | return NULL; |
253 | 0 | } |
254 | 48.9M | } |
255 | 310M | if (size <= 0) { |
256 | 261M | op->ob_item = NULL; |
257 | 261M | } |
258 | 48.3M | else { |
259 | | #ifdef Py_GIL_DISABLED |
260 | | _PyListArray *array = list_allocate_array(size); |
261 | | if (array == NULL) { |
262 | | Py_DECREF(op); |
263 | | return PyErr_NoMemory(); |
264 | | } |
265 | | memset(&array->ob_item, 0, size * sizeof(PyObject *)); |
266 | | op->ob_item = array->ob_item; |
267 | | #else |
268 | 48.3M | op->ob_item = (PyObject **) PyMem_Calloc(size, sizeof(PyObject *)); |
269 | 48.3M | #endif |
270 | 48.3M | if (op->ob_item == NULL) { |
271 | 0 | Py_DECREF(op); |
272 | 0 | return PyErr_NoMemory(); |
273 | 0 | } |
274 | 48.3M | } |
275 | 310M | Py_SET_SIZE(op, size); |
276 | 310M | op->allocated = size; |
277 | 310M | _PyObject_GC_TRACK(op); |
278 | 310M | return (PyObject *) op; |
279 | 310M | } |
280 | | |
281 | | static PyObject * |
282 | | list_new_prealloc(Py_ssize_t size) |
283 | 17.7M | { |
284 | 17.7M | assert(size > 0); |
285 | 17.7M | PyListObject *op = (PyListObject *) PyList_New(0); |
286 | 17.7M | if (op == NULL) { |
287 | 0 | return NULL; |
288 | 0 | } |
289 | 17.7M | assert(op->ob_item == NULL); |
290 | | #ifdef Py_GIL_DISABLED |
291 | | _PyListArray *array = list_allocate_array(size); |
292 | | if (array == NULL) { |
293 | | Py_DECREF(op); |
294 | | return PyErr_NoMemory(); |
295 | | } |
296 | | op->ob_item = array->ob_item; |
297 | | #else |
298 | 17.7M | op->ob_item = PyMem_New(PyObject *, size); |
299 | 17.7M | if (op->ob_item == NULL) { |
300 | 0 | Py_DECREF(op); |
301 | 0 | return PyErr_NoMemory(); |
302 | 0 | } |
303 | 17.7M | #endif |
304 | 17.7M | op->allocated = size; |
305 | 17.7M | return (PyObject *) op; |
306 | 17.7M | } |
307 | | |
308 | | Py_ssize_t |
309 | | PyList_Size(PyObject *op) |
310 | 101k | { |
311 | 101k | if (!PyList_Check(op)) { |
312 | 0 | PyErr_BadInternalCall(); |
313 | 0 | return -1; |
314 | 0 | } |
315 | 101k | else { |
316 | 101k | return PyList_GET_SIZE(op); |
317 | 101k | } |
318 | 101k | } |
319 | | |
320 | | static inline int |
321 | | valid_index(Py_ssize_t i, Py_ssize_t limit) |
322 | 248M | { |
323 | | /* The cast to size_t lets us use just a single comparison |
324 | | to check whether i is in the range: 0 <= i < limit. |
325 | | |
326 | | See: Section 14.2 "Bounds Checking" in the Agner Fog |
327 | | optimization manual found at: |
328 | | https://www.agner.org/optimize/optimizing_cpp.pdf |
329 | | */ |
330 | 248M | return (size_t) i < (size_t) limit; |
331 | 248M | } |
332 | | |
333 | | #ifdef Py_GIL_DISABLED |
334 | | |
335 | | static PyObject * |
336 | | list_item_impl(PyListObject *self, Py_ssize_t idx) |
337 | | { |
338 | | PyObject *item = NULL; |
339 | | Py_BEGIN_CRITICAL_SECTION(self); |
340 | | if (!_PyObject_GC_IS_SHARED(self)) { |
341 | | _PyObject_GC_SET_SHARED(self); |
342 | | } |
343 | | Py_ssize_t size = Py_SIZE(self); |
344 | | if (!valid_index(idx, size)) { |
345 | | goto exit; |
346 | | } |
347 | | item = _Py_NewRefWithLock(self->ob_item[idx]); |
348 | | exit: |
349 | | Py_END_CRITICAL_SECTION(); |
350 | | return item; |
351 | | } |
352 | | |
353 | | static inline PyObject* |
354 | | list_get_item_ref(PyListObject *op, Py_ssize_t i) |
355 | | { |
356 | | if (!_Py_IsOwnedByCurrentThread((PyObject *)op) && !_PyObject_GC_IS_SHARED(op)) { |
357 | | return list_item_impl(op, i); |
358 | | } |
359 | | // Need atomic operation for the getting size. |
360 | | Py_ssize_t size = PyList_GET_SIZE(op); |
361 | | if (!valid_index(i, size)) { |
362 | | return NULL; |
363 | | } |
364 | | PyObject **ob_item = _Py_atomic_load_ptr(&op->ob_item); |
365 | | if (ob_item == NULL) { |
366 | | return NULL; |
367 | | } |
368 | | Py_ssize_t cap = list_capacity(ob_item); |
369 | | assert(cap != -1); |
370 | | if (!valid_index(i, cap)) { |
371 | | return NULL; |
372 | | } |
373 | | PyObject *item = _Py_TryXGetRef(&ob_item[i]); |
374 | | if (item == NULL) { |
375 | | return list_item_impl(op, i); |
376 | | } |
377 | | return item; |
378 | | } |
379 | | #else |
380 | | static inline PyObject* |
381 | | list_get_item_ref(PyListObject *op, Py_ssize_t i) |
382 | 183M | { |
383 | 183M | if (!valid_index(i, Py_SIZE(op))) { |
384 | 31.0M | return NULL; |
385 | 31.0M | } |
386 | 152M | return Py_NewRef(PyList_GET_ITEM(op, i)); |
387 | 183M | } |
388 | | #endif |
389 | | |
390 | | PyObject * |
391 | | PyList_GetItem(PyObject *op, Py_ssize_t i) |
392 | 688 | { |
393 | 688 | if (!PyList_Check(op)) { |
394 | 0 | PyErr_BadInternalCall(); |
395 | 0 | return NULL; |
396 | 0 | } |
397 | 688 | if (!valid_index(i, Py_SIZE(op))) { |
398 | 0 | _Py_DECLARE_STR(list_err, "list index out of range"); |
399 | 0 | PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err)); |
400 | 0 | return NULL; |
401 | 0 | } |
402 | 688 | return ((PyListObject *)op) -> ob_item[i]; |
403 | 688 | } |
404 | | |
405 | | PyObject * |
406 | | PyList_GetItemRef(PyObject *op, Py_ssize_t i) |
407 | 92.8k | { |
408 | 92.8k | if (!PyList_Check(op)) { |
409 | 0 | PyErr_SetString(PyExc_TypeError, "expected a list"); |
410 | 0 | return NULL; |
411 | 0 | } |
412 | 92.8k | PyObject *item = list_get_item_ref((PyListObject *)op, i); |
413 | 92.8k | if (item == NULL) { |
414 | 0 | _Py_DECLARE_STR(list_err, "list index out of range"); |
415 | 0 | PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err)); |
416 | 0 | return NULL; |
417 | 0 | } |
418 | 92.8k | return item; |
419 | 92.8k | } |
420 | | |
421 | | PyObject * |
422 | | _PyList_GetItemRef(PyListObject *list, Py_ssize_t i) |
423 | 1.48M | { |
424 | 1.48M | return list_get_item_ref(list, i); |
425 | 1.48M | } |
426 | | |
427 | | #ifdef Py_GIL_DISABLED |
428 | | int |
429 | | _PyList_GetItemRefNoLock(PyListObject *list, Py_ssize_t i, _PyStackRef *result) |
430 | | { |
431 | | assert(_Py_IsOwnedByCurrentThread((PyObject *)list) || |
432 | | _PyObject_GC_IS_SHARED(list)); |
433 | | if (!valid_index(i, PyList_GET_SIZE(list))) { |
434 | | return 0; |
435 | | } |
436 | | PyObject **ob_item = _Py_atomic_load_ptr(&list->ob_item); |
437 | | if (ob_item == NULL) { |
438 | | return 0; |
439 | | } |
440 | | Py_ssize_t cap = list_capacity(ob_item); |
441 | | assert(cap != -1); |
442 | | if (!valid_index(i, cap)) { |
443 | | return 0; |
444 | | } |
445 | | PyObject *obj = _Py_atomic_load_ptr(&ob_item[i]); |
446 | | if (obj == NULL || !_Py_TryIncrefCompareStackRef(&ob_item[i], obj, result)) { |
447 | | return -1; |
448 | | } |
449 | | return 1; |
450 | | } |
451 | | #endif |
452 | | |
453 | | int |
454 | | PyList_SetItem(PyObject *op, Py_ssize_t i, |
455 | | PyObject *newitem) |
456 | 26.5k | { |
457 | 26.5k | if (!PyList_Check(op)) { |
458 | 0 | Py_XDECREF(newitem); |
459 | 0 | PyErr_BadInternalCall(); |
460 | 0 | return -1; |
461 | 0 | } |
462 | 26.5k | int ret; |
463 | 26.5k | PyListObject *self = ((PyListObject *)op); |
464 | 26.5k | Py_BEGIN_CRITICAL_SECTION(self); |
465 | 26.5k | if (!valid_index(i, Py_SIZE(self))) { |
466 | 0 | Py_XDECREF(newitem); |
467 | 0 | PyErr_SetString(PyExc_IndexError, |
468 | 0 | "list assignment index out of range"); |
469 | 0 | ret = -1; |
470 | 0 | goto end; |
471 | 0 | } |
472 | 26.5k | PyObject *tmp = self->ob_item[i]; |
473 | 26.5k | FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item[i], newitem); |
474 | 26.5k | Py_XDECREF(tmp); |
475 | 26.5k | ret = 0; |
476 | 26.5k | end:; |
477 | 26.5k | Py_END_CRITICAL_SECTION(); |
478 | 26.5k | return ret; |
479 | 26.5k | } |
480 | | |
481 | | static int |
482 | | ins1(PyListObject *self, Py_ssize_t where, PyObject *v) |
483 | 1.42k | { |
484 | 1.42k | Py_ssize_t i, n = Py_SIZE(self); |
485 | 1.42k | PyObject **items; |
486 | 1.42k | if (v == NULL) { |
487 | 0 | PyErr_BadInternalCall(); |
488 | 0 | return -1; |
489 | 0 | } |
490 | | |
491 | 1.42k | assert((size_t)n + 1 < PY_SSIZE_T_MAX); |
492 | 1.42k | if (list_resize(self, n+1) < 0) |
493 | 0 | return -1; |
494 | | |
495 | 1.42k | if (where < 0) { |
496 | 0 | where += n; |
497 | 0 | if (where < 0) |
498 | 0 | where = 0; |
499 | 0 | } |
500 | 1.42k | if (where > n) |
501 | 0 | where = n; |
502 | 1.42k | items = self->ob_item; |
503 | 10.2k | for (i = n; --i >= where; ) |
504 | 8.85k | FT_ATOMIC_STORE_PTR_RELEASE(items[i+1], items[i]); |
505 | 1.42k | FT_ATOMIC_STORE_PTR_RELEASE(items[where], Py_NewRef(v)); |
506 | 1.42k | return 0; |
507 | 1.42k | } |
508 | | |
509 | | int |
510 | | PyList_Insert(PyObject *op, Py_ssize_t where, PyObject *newitem) |
511 | 30 | { |
512 | 30 | if (!PyList_Check(op)) { |
513 | 0 | PyErr_BadInternalCall(); |
514 | 0 | return -1; |
515 | 0 | } |
516 | 30 | PyListObject *self = (PyListObject *)op; |
517 | 30 | int err; |
518 | 30 | Py_BEGIN_CRITICAL_SECTION(self); |
519 | 30 | err = ins1(self, where, newitem); |
520 | 30 | Py_END_CRITICAL_SECTION(); |
521 | 30 | return err; |
522 | 30 | } |
523 | | |
524 | | /* internal, used by _PyList_AppendTakeRef */ |
525 | | int |
526 | | _PyList_AppendTakeRefListResize(PyListObject *self, PyObject *newitem) |
527 | 84.9M | { |
528 | 84.9M | Py_ssize_t len = Py_SIZE(self); |
529 | 84.9M | assert(self->allocated == -1 || self->allocated == len); |
530 | 84.9M | if (list_resize(self, len + 1) < 0) { |
531 | 0 | Py_DECREF(newitem); |
532 | 0 | return -1; |
533 | 0 | } |
534 | 84.9M | FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item[len], newitem); |
535 | 84.9M | return 0; |
536 | 84.9M | } |
537 | | |
538 | | int |
539 | | PyList_Append(PyObject *op, PyObject *newitem) |
540 | 181M | { |
541 | 181M | if (PyList_Check(op) && (newitem != NULL)) { |
542 | 181M | int ret; |
543 | 181M | Py_BEGIN_CRITICAL_SECTION(op); |
544 | 181M | ret = _PyList_AppendTakeRef((PyListObject *)op, Py_NewRef(newitem)); |
545 | 181M | Py_END_CRITICAL_SECTION(); |
546 | 181M | return ret; |
547 | 181M | } |
548 | 0 | PyErr_BadInternalCall(); |
549 | 0 | return -1; |
550 | 181M | } |
551 | | |
552 | | /* Methods */ |
553 | | |
554 | | static void |
555 | | list_dealloc(PyObject *self) |
556 | 331M | { |
557 | 331M | PyListObject *op = (PyListObject *)self; |
558 | 331M | Py_ssize_t i; |
559 | 331M | PyObject_GC_UnTrack(op); |
560 | 331M | if (op->ob_item != NULL) { |
561 | | /* Do it backwards, for Christian Tismer. |
562 | | There's a simple test case where somehow this reduces |
563 | | thrashing when a *very* large list is created and |
564 | | immediately deleted. */ |
565 | 132M | i = Py_SIZE(op); |
566 | 1.75G | while (--i >= 0) { |
567 | 1.62G | Py_XDECREF(op->ob_item[i]); |
568 | 1.62G | } |
569 | 132M | free_list_items(op->ob_item, false); |
570 | 132M | op->ob_item = NULL; |
571 | 132M | } |
572 | 331M | if (PyList_CheckExact(op)) { |
573 | 320M | _Py_FREELIST_FREE(lists, op, PyObject_GC_Del); |
574 | 320M | } |
575 | 11.1M | else { |
576 | 11.1M | PyObject_GC_Del(op); |
577 | 11.1M | } |
578 | 331M | } |
579 | | |
580 | | static PyObject * |
581 | | list_repr_impl(PyListObject *v) |
582 | 4.01M | { |
583 | 4.01M | int res = Py_ReprEnter((PyObject*)v); |
584 | 4.01M | if (res != 0) { |
585 | 0 | return (res > 0 ? PyUnicode_FromString("[...]") : NULL); |
586 | 0 | } |
587 | | |
588 | | /* "[" + "1" + ", 2" * (len - 1) + "]" */ |
589 | 4.01M | Py_ssize_t prealloc = 1 + 1 + (2 + 1) * (Py_SIZE(v) - 1) + 1; |
590 | 4.01M | PyUnicodeWriter *writer = PyUnicodeWriter_Create(prealloc); |
591 | 4.01M | PyObject *item = NULL; |
592 | 4.01M | if (writer == NULL) { |
593 | 0 | goto error; |
594 | 0 | } |
595 | | |
596 | 4.01M | if (PyUnicodeWriter_WriteChar(writer, '[') < 0) { |
597 | 0 | goto error; |
598 | 0 | } |
599 | | |
600 | | /* Do repr() on each element. Note that this may mutate the list, |
601 | | so must refetch the list size on each iteration. */ |
602 | 11.5M | for (Py_ssize_t i = 0; i < Py_SIZE(v); ++i) { |
603 | | /* Hold a strong reference since repr(item) can mutate the list */ |
604 | 7.52M | item = Py_NewRef(v->ob_item[i]); |
605 | | |
606 | 7.52M | if (i > 0) { |
607 | 3.51M | if (PyUnicodeWriter_WriteChar(writer, ',') < 0) { |
608 | 0 | goto error; |
609 | 0 | } |
610 | 3.51M | if (PyUnicodeWriter_WriteChar(writer, ' ') < 0) { |
611 | 0 | goto error; |
612 | 0 | } |
613 | 3.51M | } |
614 | | |
615 | 7.52M | if (PyUnicodeWriter_WriteRepr(writer, item) < 0) { |
616 | 0 | goto error; |
617 | 0 | } |
618 | 7.52M | Py_CLEAR(item); |
619 | 7.52M | } |
620 | | |
621 | 4.01M | if (PyUnicodeWriter_WriteChar(writer, ']') < 0) { |
622 | 0 | goto error; |
623 | 0 | } |
624 | | |
625 | 4.01M | Py_ReprLeave((PyObject *)v); |
626 | 4.01M | return PyUnicodeWriter_Finish(writer); |
627 | | |
628 | 0 | error: |
629 | 0 | Py_XDECREF(item); |
630 | 0 | PyUnicodeWriter_Discard(writer); |
631 | 0 | Py_ReprLeave((PyObject *)v); |
632 | 0 | return NULL; |
633 | 4.01M | } |
634 | | |
635 | | static PyObject * |
636 | | list_repr(PyObject *self) |
637 | 4.01M | { |
638 | 4.01M | if (PyList_GET_SIZE(self) == 0) { |
639 | 4.32k | return PyUnicode_FromString("[]"); |
640 | 4.32k | } |
641 | 4.01M | PyListObject *v = (PyListObject *)self; |
642 | 4.01M | PyObject *ret = NULL; |
643 | 4.01M | Py_BEGIN_CRITICAL_SECTION(v); |
644 | 4.01M | ret = list_repr_impl(v); |
645 | 4.01M | Py_END_CRITICAL_SECTION(); |
646 | 4.01M | return ret; |
647 | 4.01M | } |
648 | | |
649 | | static Py_ssize_t |
650 | | list_length(PyObject *a) |
651 | 77.1M | { |
652 | 77.1M | return PyList_GET_SIZE(a); |
653 | 77.1M | } |
654 | | |
655 | | static int |
656 | | list_contains(PyObject *aa, PyObject *el) |
657 | 8.64k | { |
658 | | |
659 | 43.8k | for (Py_ssize_t i = 0; ; i++) { |
660 | 43.8k | PyObject *item = list_get_item_ref((PyListObject *)aa, i); |
661 | 43.8k | if (item == NULL) { |
662 | | // out-of-bounds |
663 | 7.57k | return 0; |
664 | 7.57k | } |
665 | 36.2k | int cmp = PyObject_RichCompareBool(item, el, Py_EQ); |
666 | 36.2k | Py_DECREF(item); |
667 | 36.2k | if (cmp != 0) { |
668 | 1.06k | return cmp; |
669 | 1.06k | } |
670 | 36.2k | } |
671 | 0 | return 0; |
672 | 8.64k | } |
673 | | |
674 | | static PyObject * |
675 | | list_item(PyObject *aa, Py_ssize_t i) |
676 | 23.8M | { |
677 | 23.8M | PyListObject *a = (PyListObject *)aa; |
678 | 23.8M | if (!valid_index(i, PyList_GET_SIZE(a))) { |
679 | 2.29M | PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err)); |
680 | 2.29M | return NULL; |
681 | 2.29M | } |
682 | 21.5M | PyObject *item; |
683 | | #ifdef Py_GIL_DISABLED |
684 | | item = list_get_item_ref(a, i); |
685 | | if (item == NULL) { |
686 | | PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err)); |
687 | | return NULL; |
688 | | } |
689 | | #else |
690 | 21.5M | item = Py_NewRef(a->ob_item[i]); |
691 | 21.5M | #endif |
692 | 21.5M | return item; |
693 | 23.8M | } |
694 | | |
695 | | static PyObject * |
696 | | list_slice_lock_held(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh) |
697 | 4.39M | { |
698 | 4.39M | PyListObject *np; |
699 | 4.39M | PyObject **src, **dest; |
700 | 4.39M | Py_ssize_t i, len; |
701 | 4.39M | len = ihigh - ilow; |
702 | 4.39M | if (len <= 0) { |
703 | 0 | return PyList_New(0); |
704 | 0 | } |
705 | 4.39M | np = (PyListObject *) list_new_prealloc(len); |
706 | 4.39M | if (np == NULL) |
707 | 0 | return NULL; |
708 | | |
709 | 4.39M | src = a->ob_item + ilow; |
710 | 4.39M | dest = np->ob_item; |
711 | 32.0M | for (i = 0; i < len; i++) { |
712 | 27.6M | PyObject *v = src[i]; |
713 | 27.6M | dest[i] = Py_NewRef(v); |
714 | 27.6M | } |
715 | 4.39M | Py_SET_SIZE(np, len); |
716 | 4.39M | return (PyObject *)np; |
717 | 4.39M | } |
718 | | |
719 | | PyObject * |
720 | | PyList_GetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh) |
721 | 0 | { |
722 | 0 | if (!PyList_Check(a)) { |
723 | 0 | PyErr_BadInternalCall(); |
724 | 0 | return NULL; |
725 | 0 | } |
726 | 0 | PyObject *ret; |
727 | 0 | Py_BEGIN_CRITICAL_SECTION(a); |
728 | 0 | if (ilow < 0) { |
729 | 0 | ilow = 0; |
730 | 0 | } |
731 | 0 | else if (ilow > Py_SIZE(a)) { |
732 | 0 | ilow = Py_SIZE(a); |
733 | 0 | } |
734 | 0 | if (ihigh < ilow) { |
735 | 0 | ihigh = ilow; |
736 | 0 | } |
737 | 0 | else if (ihigh > Py_SIZE(a)) { |
738 | 0 | ihigh = Py_SIZE(a); |
739 | 0 | } |
740 | 0 | ret = list_slice_lock_held((PyListObject *)a, ilow, ihigh); |
741 | 0 | Py_END_CRITICAL_SECTION(); |
742 | 0 | return ret; |
743 | 0 | } |
744 | | |
745 | | static PyObject * |
746 | | list_concat_lock_held(PyListObject *a, PyListObject *b) |
747 | 20.8M | { |
748 | 20.8M | Py_ssize_t size; |
749 | 20.8M | Py_ssize_t i; |
750 | 20.8M | PyObject **src, **dest; |
751 | 20.8M | PyListObject *np; |
752 | 20.8M | assert((size_t)Py_SIZE(a) + (size_t)Py_SIZE(b) < PY_SSIZE_T_MAX); |
753 | 20.8M | size = Py_SIZE(a) + Py_SIZE(b); |
754 | 20.8M | if (size == 0) { |
755 | 7.52M | return PyList_New(0); |
756 | 7.52M | } |
757 | 13.2M | np = (PyListObject *) list_new_prealloc(size); |
758 | 13.2M | if (np == NULL) { |
759 | 0 | return NULL; |
760 | 0 | } |
761 | 13.2M | src = a->ob_item; |
762 | 13.2M | dest = np->ob_item; |
763 | 504M | for (i = 0; i < Py_SIZE(a); i++) { |
764 | 491M | PyObject *v = src[i]; |
765 | 491M | dest[i] = Py_NewRef(v); |
766 | 491M | } |
767 | 13.2M | src = b->ob_item; |
768 | 13.2M | dest = np->ob_item + Py_SIZE(a); |
769 | 244M | for (i = 0; i < Py_SIZE(b); i++) { |
770 | 231M | PyObject *v = src[i]; |
771 | 231M | dest[i] = Py_NewRef(v); |
772 | 231M | } |
773 | 13.2M | Py_SET_SIZE(np, size); |
774 | 13.2M | return (PyObject *)np; |
775 | 13.2M | } |
776 | | |
777 | | static PyObject * |
778 | | list_concat(PyObject *aa, PyObject *bb) |
779 | 20.8M | { |
780 | 20.8M | if (!PyList_Check(bb)) { |
781 | 0 | PyErr_Format(PyExc_TypeError, |
782 | 0 | "can only concatenate list (not \"%.200s\") to list", |
783 | 0 | Py_TYPE(bb)->tp_name); |
784 | 0 | return NULL; |
785 | 0 | } |
786 | 20.8M | PyListObject *a = (PyListObject *)aa; |
787 | 20.8M | PyListObject *b = (PyListObject *)bb; |
788 | 20.8M | PyObject *ret; |
789 | 20.8M | Py_BEGIN_CRITICAL_SECTION2(a, b); |
790 | 20.8M | ret = list_concat_lock_held(a, b); |
791 | 20.8M | Py_END_CRITICAL_SECTION2(); |
792 | 20.8M | return ret; |
793 | 20.8M | } |
794 | | |
795 | | static PyObject * |
796 | | list_repeat_lock_held(PyListObject *a, Py_ssize_t n) |
797 | 19.2k | { |
798 | 19.2k | const Py_ssize_t input_size = Py_SIZE(a); |
799 | 19.2k | if (input_size == 0 || n <= 0) |
800 | 2.38k | return PyList_New(0); |
801 | 19.2k | assert(n > 0); |
802 | | |
803 | 16.8k | if (input_size > PY_SSIZE_T_MAX / n) |
804 | 0 | return PyErr_NoMemory(); |
805 | 16.8k | Py_ssize_t output_size = input_size * n; |
806 | | |
807 | 16.8k | PyListObject *np = (PyListObject *) list_new_prealloc(output_size); |
808 | 16.8k | if (np == NULL) |
809 | 0 | return NULL; |
810 | | |
811 | 16.8k | PyObject **dest = np->ob_item; |
812 | 16.8k | if (input_size == 1) { |
813 | 16.8k | PyObject *elem = a->ob_item[0]; |
814 | 16.8k | _Py_RefcntAdd(elem, n); |
815 | 16.8k | PyObject **dest_end = dest + output_size; |
816 | 17.6M | while (dest < dest_end) { |
817 | 17.5M | *dest++ = elem; |
818 | 17.5M | } |
819 | 16.8k | } |
820 | 0 | else { |
821 | 0 | PyObject **src = a->ob_item; |
822 | 0 | PyObject **src_end = src + input_size; |
823 | 0 | while (src < src_end) { |
824 | 0 | _Py_RefcntAdd(*src, n); |
825 | 0 | *dest++ = *src++; |
826 | 0 | } |
827 | | // This list is not yet visible to other threads, so atomic repeat |
828 | | // is not necessary even in Py_GIL_DISABLED builds. |
829 | 0 | _Py_memory_repeat((char *)np->ob_item, sizeof(PyObject *)*output_size, |
830 | 0 | sizeof(PyObject *)*input_size); |
831 | 0 | } |
832 | | |
833 | 16.8k | Py_SET_SIZE(np, output_size); |
834 | 16.8k | return (PyObject *) np; |
835 | 16.8k | } |
836 | | |
837 | | static PyObject * |
838 | | list_repeat(PyObject *aa, Py_ssize_t n) |
839 | 19.2k | { |
840 | 19.2k | PyObject *ret; |
841 | 19.2k | PyListObject *a = (PyListObject *)aa; |
842 | 19.2k | Py_BEGIN_CRITICAL_SECTION(a); |
843 | 19.2k | ret = list_repeat_lock_held(a, n); |
844 | 19.2k | Py_END_CRITICAL_SECTION(); |
845 | 19.2k | return ret; |
846 | 19.2k | } |
847 | | |
848 | | static void |
849 | | list_clear_impl(PyListObject *a, bool is_resize) |
850 | 16.2M | { |
851 | 16.2M | PyObject **items = a->ob_item; |
852 | 16.2M | if (items == NULL) { |
853 | 0 | return; |
854 | 0 | } |
855 | | |
856 | | /* Because XDECREF can recursively invoke operations on |
857 | | this list, we make it empty first. */ |
858 | 16.2M | Py_ssize_t i = Py_SIZE(a); |
859 | 16.2M | Py_SET_SIZE(a, 0); |
860 | 16.2M | FT_ATOMIC_STORE_PTR_RELEASE(a->ob_item, NULL); |
861 | 16.2M | a->allocated = 0; |
862 | 32.5M | while (--i >= 0) { |
863 | 16.3M | Py_XDECREF(items[i]); |
864 | 16.3M | } |
865 | | #ifdef Py_GIL_DISABLED |
866 | | if (is_resize) { |
867 | | ensure_shared_on_resize(a); |
868 | | } |
869 | | bool use_qsbr = is_resize && _PyObject_GC_IS_SHARED(a); |
870 | | #else |
871 | 16.2M | bool use_qsbr = false; |
872 | 16.2M | #endif |
873 | 16.2M | free_list_items(items, use_qsbr); |
874 | | // Note that there is no guarantee that the list is actually empty |
875 | | // at this point, because XDECREF may have populated it indirectly again! |
876 | 16.2M | } |
877 | | |
878 | | static void |
879 | | list_clear(PyListObject *a) |
880 | 16.2M | { |
881 | 16.2M | list_clear_impl(a, true); |
882 | 16.2M | } |
883 | | |
884 | | static int |
885 | | list_clear_slot(PyObject *self) |
886 | 0 | { |
887 | 0 | list_clear_impl((PyListObject *)self, false); |
888 | 0 | return 0; |
889 | 0 | } |
890 | | |
891 | | // Pointer-by-pointer memmove for PyObject** arrays that is safe |
892 | | // for shared lists in Py_GIL_DISABLED builds. |
893 | | static void |
894 | | ptr_wise_atomic_memmove(PyListObject *a, PyObject **dest, PyObject **src, Py_ssize_t n) |
895 | 15.4M | { |
896 | 15.4M | #ifndef Py_GIL_DISABLED |
897 | 15.4M | memmove(dest, src, n * sizeof(PyObject *)); |
898 | | #else |
899 | | _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(a); |
900 | | if (_Py_IsOwnedByCurrentThread((PyObject *)a) && !_PyObject_GC_IS_SHARED(a)) { |
901 | | // No other threads can read this list concurrently |
902 | | memmove(dest, src, n * sizeof(PyObject *)); |
903 | | return; |
904 | | } |
905 | | if (dest < src) { |
906 | | for (Py_ssize_t i = 0; i != n; i++) { |
907 | | _Py_atomic_store_ptr_release(&dest[i], src[i]); |
908 | | } |
909 | | } |
910 | | else { |
911 | | // copy backwards to avoid overwriting src before it's read |
912 | | for (Py_ssize_t i = n; i != 0; i--) { |
913 | | _Py_atomic_store_ptr_release(&dest[i - 1], src[i - 1]); |
914 | | } |
915 | | } |
916 | | #endif |
917 | 15.4M | } |
918 | | |
919 | | /* a[ilow:ihigh] = v if v != NULL. |
920 | | * del a[ilow:ihigh] if v == NULL. |
921 | | * |
922 | | * Special speed gimmick: when v is NULL and ihigh - ilow <= 8, it's |
923 | | * guaranteed the call cannot fail. |
924 | | */ |
925 | | static int |
926 | | list_ass_slice_lock_held(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v) |
927 | 4.31M | { |
928 | | /* Because [X]DECREF can recursively invoke list operations on |
929 | | this list, we must postpone all [X]DECREF activity until |
930 | | after the list is back in its canonical shape. Therefore |
931 | | we must allocate an additional array, 'recycle', into which |
932 | | we temporarily copy the items that are deleted from the |
933 | | list. :-( */ |
934 | 4.31M | PyObject *recycle_on_stack[8]; |
935 | 4.31M | PyObject **recycle = recycle_on_stack; /* will allocate more if needed */ |
936 | 4.31M | PyObject **item; |
937 | 4.31M | PyObject **vitem = NULL; |
938 | 4.31M | PyObject *v_as_SF = NULL; /* PySequence_Fast(v) */ |
939 | 4.31M | Py_ssize_t n; /* # of elements in replacement list */ |
940 | 4.31M | Py_ssize_t norig; /* # of elements in list getting replaced */ |
941 | 4.31M | Py_ssize_t d; /* Change in size */ |
942 | 4.31M | Py_ssize_t k; |
943 | 4.31M | size_t s; |
944 | 4.31M | int result = -1; /* guilty until proved innocent */ |
945 | 4.31M | #define b ((PyListObject *)v) |
946 | 4.31M | if (v == NULL) |
947 | 4.04M | n = 0; |
948 | 274k | else { |
949 | 274k | v_as_SF = PySequence_Fast(v, "can only assign an iterable"); |
950 | 274k | if(v_as_SF == NULL) |
951 | 0 | goto Error; |
952 | 274k | n = PySequence_Fast_GET_SIZE(v_as_SF); |
953 | 274k | vitem = PySequence_Fast_ITEMS(v_as_SF); |
954 | 274k | } |
955 | 4.31M | if (ilow < 0) |
956 | 0 | ilow = 0; |
957 | 4.31M | else if (ilow > Py_SIZE(a)) |
958 | 0 | ilow = Py_SIZE(a); |
959 | | |
960 | 4.31M | if (ihigh < ilow) |
961 | 0 | ihigh = ilow; |
962 | 4.31M | else if (ihigh > Py_SIZE(a)) |
963 | 0 | ihigh = Py_SIZE(a); |
964 | | |
965 | 4.31M | norig = ihigh - ilow; |
966 | 4.31M | assert(norig >= 0); |
967 | 4.31M | d = n - norig; |
968 | 4.31M | if (Py_SIZE(a) + d == 0) { |
969 | 601k | Py_XDECREF(v_as_SF); |
970 | 601k | list_clear(a); |
971 | 601k | return 0; |
972 | 601k | } |
973 | 3.71M | item = a->ob_item; |
974 | | /* recycle the items that we are about to remove */ |
975 | 3.71M | s = norig * sizeof(PyObject *); |
976 | | /* If norig == 0, item might be NULL, in which case we may not memcpy from it. */ |
977 | 3.71M | if (s) { |
978 | 3.47M | if (s > sizeof(recycle_on_stack)) { |
979 | 148 | recycle = (PyObject **)PyMem_Malloc(s); |
980 | 148 | if (recycle == NULL) { |
981 | 0 | PyErr_NoMemory(); |
982 | 0 | goto Error; |
983 | 0 | } |
984 | 148 | } |
985 | 3.47M | memcpy(recycle, &item[ilow], s); |
986 | 3.47M | } |
987 | | |
988 | 3.71M | if (d < 0) { /* Delete -d items */ |
989 | 3.46M | Py_ssize_t tail = Py_SIZE(a) - ihigh; |
990 | 3.46M | ptr_wise_atomic_memmove(a, &item[ihigh+d], &item[ihigh], tail); |
991 | 3.46M | (void)list_resize(a, Py_SIZE(a) + d); // NB: shrinking a list can't fail |
992 | 3.46M | item = a->ob_item; |
993 | 3.46M | } |
994 | 249k | else if (d > 0) { /* Insert d items */ |
995 | 249k | k = Py_SIZE(a); |
996 | 249k | if (list_resize(a, k+d) < 0) |
997 | 0 | goto Error; |
998 | 249k | item = a->ob_item; |
999 | 249k | ptr_wise_atomic_memmove(a, &item[ihigh+d], &item[ihigh], k - ihigh); |
1000 | 249k | } |
1001 | 9.09M | for (k = 0; k < n; k++, ilow++) { |
1002 | 5.37M | PyObject *w = vitem[k]; |
1003 | 5.37M | FT_ATOMIC_STORE_PTR_RELEASE(item[ilow], Py_XNewRef(w)); |
1004 | 5.37M | } |
1005 | 7.20M | for (k = norig - 1; k >= 0; --k) |
1006 | 3.48M | Py_XDECREF(recycle[k]); |
1007 | 3.71M | result = 0; |
1008 | 3.71M | Error: |
1009 | 3.71M | if (recycle != recycle_on_stack) |
1010 | 148 | PyMem_Free(recycle); |
1011 | 3.71M | Py_XDECREF(v_as_SF); |
1012 | 3.71M | return result; |
1013 | 3.71M | #undef b |
1014 | 3.71M | } |
1015 | | |
1016 | | static int |
1017 | | list_ass_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v) |
1018 | 4.03M | { |
1019 | 4.03M | int ret; |
1020 | 4.03M | if (a == (PyListObject *)v) { |
1021 | 0 | Py_BEGIN_CRITICAL_SECTION(a); |
1022 | 0 | Py_ssize_t n = PyList_GET_SIZE(a); |
1023 | 0 | PyObject *copy = list_slice_lock_held(a, 0, n); |
1024 | 0 | if (copy == NULL) { |
1025 | 0 | ret = -1; |
1026 | 0 | } |
1027 | 0 | else { |
1028 | 0 | ret = list_ass_slice_lock_held(a, ilow, ihigh, copy); |
1029 | 0 | Py_DECREF(copy); |
1030 | 0 | } |
1031 | 0 | Py_END_CRITICAL_SECTION(); |
1032 | 0 | } |
1033 | 4.03M | else if (v != NULL && PyList_CheckExact(v)) { |
1034 | 262 | Py_BEGIN_CRITICAL_SECTION2(a, v); |
1035 | 262 | ret = list_ass_slice_lock_held(a, ilow, ihigh, v); |
1036 | 262 | Py_END_CRITICAL_SECTION2(); |
1037 | 262 | } |
1038 | 4.03M | else { |
1039 | 4.03M | Py_BEGIN_CRITICAL_SECTION(a); |
1040 | 4.03M | ret = list_ass_slice_lock_held(a, ilow, ihigh, v); |
1041 | 4.03M | Py_END_CRITICAL_SECTION(); |
1042 | 4.03M | } |
1043 | 4.03M | return ret; |
1044 | 4.03M | } |
1045 | | |
1046 | | int |
1047 | | PyList_SetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v) |
1048 | 4.03M | { |
1049 | 4.03M | if (!PyList_Check(a)) { |
1050 | 0 | PyErr_BadInternalCall(); |
1051 | 0 | return -1; |
1052 | 0 | } |
1053 | 4.03M | return list_ass_slice((PyListObject *)a, ilow, ihigh, v); |
1054 | 4.03M | } |
1055 | | |
1056 | | static int |
1057 | | list_inplace_repeat_lock_held(PyListObject *self, Py_ssize_t n) |
1058 | 0 | { |
1059 | 0 | Py_ssize_t input_size = PyList_GET_SIZE(self); |
1060 | 0 | if (input_size == 0 || n == 1) { |
1061 | 0 | return 0; |
1062 | 0 | } |
1063 | | |
1064 | 0 | if (n < 1) { |
1065 | 0 | list_clear(self); |
1066 | 0 | return 0; |
1067 | 0 | } |
1068 | | |
1069 | 0 | if (input_size > PY_SSIZE_T_MAX / n) { |
1070 | 0 | PyErr_NoMemory(); |
1071 | 0 | return -1; |
1072 | 0 | } |
1073 | 0 | Py_ssize_t output_size = input_size * n; |
1074 | |
|
1075 | 0 | if (list_resize(self, output_size) < 0) { |
1076 | 0 | return -1; |
1077 | 0 | } |
1078 | | |
1079 | 0 | PyObject **items = self->ob_item; |
1080 | 0 | for (Py_ssize_t j = 0; j < input_size; j++) { |
1081 | 0 | _Py_RefcntAdd(items[j], n-1); |
1082 | 0 | } |
1083 | 0 | #ifndef Py_GIL_DISABLED |
1084 | 0 | _Py_memory_repeat((char *)items, sizeof(PyObject *)*output_size, |
1085 | 0 | sizeof(PyObject *)*input_size); |
1086 | | #else |
1087 | | Py_ssize_t copied = input_size; |
1088 | | while (copied < output_size) { |
1089 | | Py_ssize_t items_to_copy = Py_MIN(copied, output_size - copied); |
1090 | | ptr_wise_atomic_memmove(self, items + copied, items, items_to_copy); |
1091 | | copied += items_to_copy; |
1092 | | } |
1093 | | #endif |
1094 | 0 | return 0; |
1095 | 0 | } |
1096 | | |
1097 | | static PyObject * |
1098 | | list_inplace_repeat(PyObject *_self, Py_ssize_t n) |
1099 | 0 | { |
1100 | 0 | PyObject *ret; |
1101 | 0 | PyListObject *self = (PyListObject *) _self; |
1102 | 0 | Py_BEGIN_CRITICAL_SECTION(self); |
1103 | 0 | if (list_inplace_repeat_lock_held(self, n) < 0) { |
1104 | 0 | ret = NULL; |
1105 | 0 | } |
1106 | 0 | else { |
1107 | 0 | ret = Py_NewRef(self); |
1108 | 0 | } |
1109 | 0 | Py_END_CRITICAL_SECTION(); |
1110 | 0 | return ret; |
1111 | 0 | } |
1112 | | |
1113 | | static int |
1114 | | list_ass_item_lock_held(PyListObject *a, Py_ssize_t i, PyObject *v) |
1115 | 2.17M | { |
1116 | 2.17M | if (!valid_index(i, Py_SIZE(a))) { |
1117 | 0 | PyErr_SetString(PyExc_IndexError, |
1118 | 0 | "list assignment index out of range"); |
1119 | 0 | return -1; |
1120 | 0 | } |
1121 | 2.17M | PyObject *tmp = a->ob_item[i]; |
1122 | 2.17M | if (v == NULL) { |
1123 | 14.8k | Py_ssize_t size = Py_SIZE(a); |
1124 | 4.21M | for (Py_ssize_t idx = i; idx < size - 1; idx++) { |
1125 | 4.19M | FT_ATOMIC_STORE_PTR_RELEASE(a->ob_item[idx], a->ob_item[idx + 1]); |
1126 | 4.19M | } |
1127 | 14.8k | Py_SET_SIZE(a, size - 1); |
1128 | 14.8k | } |
1129 | 2.15M | else { |
1130 | 2.15M | FT_ATOMIC_STORE_PTR_RELEASE(a->ob_item[i], Py_NewRef(v)); |
1131 | 2.15M | } |
1132 | 2.17M | Py_DECREF(tmp); |
1133 | 2.17M | return 0; |
1134 | 2.17M | } |
1135 | | |
1136 | | static int |
1137 | | list_ass_item(PyObject *aa, Py_ssize_t i, PyObject *v) |
1138 | 8.11k | { |
1139 | 8.11k | int ret; |
1140 | 8.11k | PyListObject *a = (PyListObject *)aa; |
1141 | 8.11k | Py_BEGIN_CRITICAL_SECTION(a); |
1142 | 8.11k | ret = list_ass_item_lock_held(a, i, v); |
1143 | 8.11k | Py_END_CRITICAL_SECTION(); |
1144 | 8.11k | return ret; |
1145 | 8.11k | } |
1146 | | |
1147 | | /*[clinic input] |
1148 | | @critical_section |
1149 | | list.insert |
1150 | | |
1151 | | index: Py_ssize_t |
1152 | | object: object |
1153 | | / |
1154 | | |
1155 | | Insert object before index. |
1156 | | [clinic start generated code]*/ |
1157 | | |
1158 | | static PyObject * |
1159 | | list_insert_impl(PyListObject *self, Py_ssize_t index, PyObject *object) |
1160 | | /*[clinic end generated code: output=7f35e32f60c8cb78 input=b1987ca998a4ae2d]*/ |
1161 | 1.39k | { |
1162 | 1.39k | if (ins1(self, index, object) == 0) { |
1163 | 1.39k | Py_RETURN_NONE; |
1164 | 1.39k | } |
1165 | 0 | return NULL; |
1166 | 1.39k | } |
1167 | | |
1168 | | /*[clinic input] |
1169 | | @critical_section |
1170 | | list.clear as py_list_clear |
1171 | | |
1172 | | Remove all items from list. |
1173 | | [clinic start generated code]*/ |
1174 | | |
1175 | | static PyObject * |
1176 | | py_list_clear_impl(PyListObject *self) |
1177 | | /*[clinic end generated code: output=83726743807e3518 input=e285b7f09051a9ba]*/ |
1178 | 217 | { |
1179 | 217 | list_clear(self); |
1180 | 217 | Py_RETURN_NONE; |
1181 | 217 | } |
1182 | | |
1183 | | /*[clinic input] |
1184 | | @critical_section |
1185 | | list.copy |
1186 | | |
1187 | | Return a shallow copy of the list. |
1188 | | [clinic start generated code]*/ |
1189 | | |
1190 | | static PyObject * |
1191 | | list_copy_impl(PyListObject *self) |
1192 | | /*[clinic end generated code: output=ec6b72d6209d418e input=81c54b0c7bb4f73d]*/ |
1193 | 0 | { |
1194 | 0 | return list_slice_lock_held(self, 0, Py_SIZE(self)); |
1195 | 0 | } |
1196 | | |
1197 | | /*[clinic input] |
1198 | | @critical_section |
1199 | | list.append |
1200 | | |
1201 | | object: object |
1202 | | / |
1203 | | |
1204 | | Append object to the end of the list. |
1205 | | [clinic start generated code]*/ |
1206 | | |
1207 | | static PyObject * |
1208 | | list_append_impl(PyListObject *self, PyObject *object) |
1209 | | /*[clinic end generated code: output=78423561d92ed405 input=122b0853de54004f]*/ |
1210 | 119M | { |
1211 | 119M | if (_PyList_AppendTakeRef(self, Py_NewRef(object)) < 0) { |
1212 | 0 | return NULL; |
1213 | 0 | } |
1214 | 119M | Py_RETURN_NONE; |
1215 | 119M | } |
1216 | | |
1217 | | static int |
1218 | | list_extend_fast(PyListObject *self, PyObject *iterable) |
1219 | 22.7M | { |
1220 | 22.7M | Py_ssize_t n = PySequence_Fast_GET_SIZE(iterable); |
1221 | 22.7M | if (n == 0) { |
1222 | | /* short circuit when iterable is empty */ |
1223 | 9.18M | return 0; |
1224 | 9.18M | } |
1225 | | |
1226 | 13.5M | Py_ssize_t m = Py_SIZE(self); |
1227 | | // It should not be possible to allocate a list large enough to cause |
1228 | | // an overflow on any relevant platform. |
1229 | 13.5M | assert(m < PY_SSIZE_T_MAX - n); |
1230 | 13.5M | if (self->ob_item == NULL) { |
1231 | 1.37M | if (list_preallocate_exact(self, n) < 0) { |
1232 | 0 | return -1; |
1233 | 0 | } |
1234 | 1.37M | Py_SET_SIZE(self, n); |
1235 | 1.37M | } |
1236 | 12.2M | else if (list_resize(self, m + n) < 0) { |
1237 | 0 | return -1; |
1238 | 0 | } |
1239 | | |
1240 | | // note that we may still have self == iterable here for the |
1241 | | // situation a.extend(a), but the following code works |
1242 | | // in that case too. Just make sure to resize self |
1243 | | // before calling PySequence_Fast_ITEMS. |
1244 | | // |
1245 | | // populate the end of self with iterable's items. |
1246 | 13.5M | PyObject **src = PySequence_Fast_ITEMS(iterable); |
1247 | 13.5M | PyObject **dest = self->ob_item + m; |
1248 | 62.6M | for (Py_ssize_t i = 0; i < n; i++) { |
1249 | 49.0M | PyObject *o = src[i]; |
1250 | 49.0M | FT_ATOMIC_STORE_PTR_RELEASE(dest[i], Py_NewRef(o)); |
1251 | 49.0M | } |
1252 | 13.5M | return 0; |
1253 | 13.5M | } |
1254 | | |
1255 | | static int |
1256 | | list_extend_iter_lock_held(PyListObject *self, PyObject *iterable) |
1257 | 5.94M | { |
1258 | 5.94M | PyObject *it = PyObject_GetIter(iterable); |
1259 | 5.94M | if (it == NULL) { |
1260 | 0 | return -1; |
1261 | 0 | } |
1262 | 5.94M | PyObject *(*iternext)(PyObject *) = *Py_TYPE(it)->tp_iternext; |
1263 | | |
1264 | | /* Guess a result list size. */ |
1265 | 5.94M | Py_ssize_t n = PyObject_LengthHint(iterable, 8); |
1266 | 5.94M | if (n < 0) { |
1267 | 0 | Py_DECREF(it); |
1268 | 0 | return -1; |
1269 | 0 | } |
1270 | | |
1271 | 5.94M | Py_ssize_t m = Py_SIZE(self); |
1272 | 5.94M | if (m > PY_SSIZE_T_MAX - n) { |
1273 | | /* m + n overflowed; on the chance that n lied, and there really |
1274 | | * is enough room, ignore it. If n was telling the truth, we'll |
1275 | | * eventually run out of memory during the loop. |
1276 | | */ |
1277 | 0 | } |
1278 | 5.94M | else if (self->ob_item == NULL) { |
1279 | 5.63M | if (n && list_preallocate_exact(self, n) < 0) |
1280 | 0 | goto error; |
1281 | 5.63M | } |
1282 | 315k | else { |
1283 | | /* Make room. */ |
1284 | 315k | if (list_resize(self, m + n) < 0) { |
1285 | 0 | goto error; |
1286 | 0 | } |
1287 | | |
1288 | | /* Make the list sane again. */ |
1289 | 315k | Py_SET_SIZE(self, m); |
1290 | 315k | } |
1291 | | |
1292 | | /* Run iterator to exhaustion. */ |
1293 | 73.2M | for (;;) { |
1294 | 73.2M | PyObject *item = iternext(it); |
1295 | 73.2M | if (item == NULL) { |
1296 | 5.94M | if (PyErr_Occurred()) { |
1297 | 674 | if (PyErr_ExceptionMatches(PyExc_StopIteration)) |
1298 | 0 | PyErr_Clear(); |
1299 | 674 | else |
1300 | 674 | goto error; |
1301 | 674 | } |
1302 | 5.94M | break; |
1303 | 5.94M | } |
1304 | | |
1305 | 67.3M | if (Py_SIZE(self) < self->allocated) { |
1306 | 66.1M | Py_ssize_t len = Py_SIZE(self); |
1307 | 66.1M | FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item[len], item); // steals item ref |
1308 | 66.1M | Py_SET_SIZE(self, len + 1); |
1309 | 66.1M | } |
1310 | 1.15M | else { |
1311 | 1.15M | if (_PyList_AppendTakeRef(self, item) < 0) |
1312 | 0 | goto error; |
1313 | 1.15M | } |
1314 | 67.3M | } |
1315 | | |
1316 | | /* Cut back result list if initial guess was too large. */ |
1317 | 5.94M | if (Py_SIZE(self) < self->allocated) { |
1318 | 4.34M | if (list_resize(self, Py_SIZE(self)) < 0) |
1319 | 0 | goto error; |
1320 | 4.34M | } |
1321 | | |
1322 | 5.94M | Py_DECREF(it); |
1323 | 5.94M | return 0; |
1324 | | |
1325 | 674 | error: |
1326 | 674 | Py_DECREF(it); |
1327 | 674 | return -1; |
1328 | 5.94M | } |
1329 | | |
1330 | | static int |
1331 | | list_extend_lock_held(PyListObject *self, PyObject *iterable) |
1332 | 22.7M | { |
1333 | 22.7M | PyObject *seq = PySequence_Fast(iterable, "argument must be iterable"); |
1334 | 22.7M | if (!seq) { |
1335 | 0 | return -1; |
1336 | 0 | } |
1337 | | |
1338 | 22.7M | int res = list_extend_fast(self, seq); |
1339 | 22.7M | Py_DECREF(seq); |
1340 | 22.7M | return res; |
1341 | 22.7M | } |
1342 | | |
1343 | | static int |
1344 | | list_extend_set(PyListObject *self, PySetObject *other) |
1345 | 20.4k | { |
1346 | 20.4k | Py_ssize_t m = Py_SIZE(self); |
1347 | 20.4k | Py_ssize_t n = PySet_GET_SIZE(other); |
1348 | 20.4k | Py_ssize_t r = m + n; |
1349 | 20.4k | if (r == 0) { |
1350 | 948 | return 0; |
1351 | 948 | } |
1352 | 19.4k | if (list_resize(self, r) < 0) { |
1353 | 0 | return -1; |
1354 | 0 | } |
1355 | | |
1356 | 19.4k | assert(self->ob_item != NULL); |
1357 | | /* populate the end of self with iterable's items */ |
1358 | 19.4k | Py_ssize_t setpos = 0; |
1359 | 19.4k | Py_hash_t hash; |
1360 | 19.4k | PyObject *key; |
1361 | 19.4k | PyObject **dest = self->ob_item + m; |
1362 | 114k | while (_PySet_NextEntryRef((PyObject *)other, &setpos, &key, &hash)) { |
1363 | 95.3k | FT_ATOMIC_STORE_PTR_RELEASE(*dest, key); |
1364 | 95.3k | dest++; |
1365 | 95.3k | } |
1366 | 19.4k | Py_SET_SIZE(self, r); |
1367 | 19.4k | return 0; |
1368 | 19.4k | } |
1369 | | |
1370 | | static int |
1371 | | list_extend_dict(PyListObject *self, PyDictObject *dict, int which_item) |
1372 | 792k | { |
1373 | | // which_item: 0 for keys and 1 for values |
1374 | 792k | Py_ssize_t m = Py_SIZE(self); |
1375 | 792k | Py_ssize_t n = PyDict_GET_SIZE(dict); |
1376 | 792k | Py_ssize_t r = m + n; |
1377 | 792k | if (r == 0) { |
1378 | 0 | return 0; |
1379 | 0 | } |
1380 | 792k | if (list_resize(self, r) < 0) { |
1381 | 0 | return -1; |
1382 | 0 | } |
1383 | | |
1384 | 792k | assert(self->ob_item != NULL); |
1385 | 792k | PyObject **dest = self->ob_item + m; |
1386 | 792k | Py_ssize_t pos = 0; |
1387 | 792k | PyObject *keyvalue[2]; |
1388 | 2.54M | while (_PyDict_Next((PyObject *)dict, &pos, &keyvalue[0], &keyvalue[1], NULL)) { |
1389 | 1.75M | PyObject *obj = keyvalue[which_item]; |
1390 | 1.75M | Py_INCREF(obj); |
1391 | 1.75M | FT_ATOMIC_STORE_PTR_RELEASE(*dest, obj); |
1392 | 1.75M | dest++; |
1393 | 1.75M | } |
1394 | | |
1395 | 792k | Py_SET_SIZE(self, r); |
1396 | 792k | return 0; |
1397 | 792k | } |
1398 | | |
1399 | | static int |
1400 | | list_extend_dictitems(PyListObject *self, PyDictObject *dict) |
1401 | 0 | { |
1402 | 0 | Py_ssize_t m = Py_SIZE(self); |
1403 | 0 | Py_ssize_t n = PyDict_GET_SIZE(dict); |
1404 | 0 | Py_ssize_t r = m + n; |
1405 | 0 | if (r == 0) { |
1406 | 0 | return 0; |
1407 | 0 | } |
1408 | 0 | if (list_resize(self, r) < 0) { |
1409 | 0 | return -1; |
1410 | 0 | } |
1411 | | |
1412 | 0 | assert(self->ob_item != NULL); |
1413 | 0 | PyObject **dest = self->ob_item + m; |
1414 | 0 | Py_ssize_t pos = 0; |
1415 | 0 | Py_ssize_t i = 0; |
1416 | 0 | PyObject *key_value[2]; |
1417 | 0 | while (_PyDict_Next((PyObject *)dict, &pos, &key_value[0], &key_value[1], NULL)) { |
1418 | 0 | PyObject *item = PyTuple_FromArray(key_value, 2); |
1419 | 0 | if (item == NULL) { |
1420 | 0 | Py_SET_SIZE(self, m + i); |
1421 | 0 | return -1; |
1422 | 0 | } |
1423 | 0 | FT_ATOMIC_STORE_PTR_RELEASE(*dest, item); |
1424 | 0 | dest++; |
1425 | 0 | i++; |
1426 | 0 | } |
1427 | | |
1428 | 0 | Py_SET_SIZE(self, r); |
1429 | 0 | return 0; |
1430 | 0 | } |
1431 | | |
1432 | | static int |
1433 | | _list_extend(PyListObject *self, PyObject *iterable) |
1434 | 29.5M | { |
1435 | | // Special case: |
1436 | | // lists and tuples which can use PySequence_Fast ops |
1437 | 29.5M | int res = -1; |
1438 | 29.5M | if ((PyObject *)self == iterable) { |
1439 | 0 | Py_BEGIN_CRITICAL_SECTION(self); |
1440 | 0 | res = list_inplace_repeat_lock_held(self, 2); |
1441 | 0 | Py_END_CRITICAL_SECTION(); |
1442 | 0 | } |
1443 | 29.5M | else if (PyList_CheckExact(iterable)) { |
1444 | 10.6M | Py_BEGIN_CRITICAL_SECTION2(self, iterable); |
1445 | 10.6M | res = list_extend_lock_held(self, iterable); |
1446 | 10.6M | Py_END_CRITICAL_SECTION2(); |
1447 | 10.6M | } |
1448 | 18.9M | else if (PyTuple_CheckExact(iterable)) { |
1449 | 12.1M | Py_BEGIN_CRITICAL_SECTION(self); |
1450 | 12.1M | res = list_extend_lock_held(self, iterable); |
1451 | 12.1M | Py_END_CRITICAL_SECTION(); |
1452 | 12.1M | } |
1453 | 6.75M | else if (PyAnySet_CheckExact(iterable)) { |
1454 | 20.4k | Py_BEGIN_CRITICAL_SECTION2(self, iterable); |
1455 | 20.4k | res = list_extend_set(self, (PySetObject *)iterable); |
1456 | 20.4k | Py_END_CRITICAL_SECTION2(); |
1457 | 20.4k | } |
1458 | 6.73M | else if (PyDict_CheckExact(iterable)) { |
1459 | 792k | Py_BEGIN_CRITICAL_SECTION2(self, iterable); |
1460 | 792k | res = list_extend_dict(self, (PyDictObject *)iterable, 0 /*keys*/); |
1461 | 792k | Py_END_CRITICAL_SECTION2(); |
1462 | 792k | } |
1463 | 5.94M | else if (Py_IS_TYPE(iterable, &PyDictKeys_Type)) { |
1464 | 0 | PyDictObject *dict = ((_PyDictViewObject *)iterable)->dv_dict; |
1465 | 0 | Py_BEGIN_CRITICAL_SECTION2(self, dict); |
1466 | 0 | res = list_extend_dict(self, dict, 0 /*keys*/); |
1467 | 0 | Py_END_CRITICAL_SECTION2(); |
1468 | 0 | } |
1469 | 5.94M | else if (Py_IS_TYPE(iterable, &PyDictValues_Type)) { |
1470 | 4 | PyDictObject *dict = ((_PyDictViewObject *)iterable)->dv_dict; |
1471 | 4 | Py_BEGIN_CRITICAL_SECTION2(self, dict); |
1472 | 4 | res = list_extend_dict(self, dict, 1 /*values*/); |
1473 | 4 | Py_END_CRITICAL_SECTION2(); |
1474 | 4 | } |
1475 | 5.94M | else if (Py_IS_TYPE(iterable, &PyDictItems_Type)) { |
1476 | 0 | PyDictObject *dict = ((_PyDictViewObject *)iterable)->dv_dict; |
1477 | 0 | Py_BEGIN_CRITICAL_SECTION2(self, dict); |
1478 | 0 | res = list_extend_dictitems(self, dict); |
1479 | 0 | Py_END_CRITICAL_SECTION2(); |
1480 | 0 | } |
1481 | 5.94M | else { |
1482 | 5.94M | Py_BEGIN_CRITICAL_SECTION(self); |
1483 | 5.94M | res = list_extend_iter_lock_held(self, iterable); |
1484 | 5.94M | Py_END_CRITICAL_SECTION(); |
1485 | 5.94M | } |
1486 | 29.5M | return res; |
1487 | 29.5M | } |
1488 | | |
1489 | | /*[clinic input] |
1490 | | list.extend as list_extend |
1491 | | |
1492 | | iterable: object |
1493 | | / |
1494 | | |
1495 | | Extend list by appending elements from the iterable. |
1496 | | [clinic start generated code]*/ |
1497 | | |
1498 | | static PyObject * |
1499 | | list_extend_impl(PyListObject *self, PyObject *iterable) |
1500 | | /*[clinic end generated code: output=b0eba9e0b186d5ce input=979da7597a515791]*/ |
1501 | 18.8M | { |
1502 | 18.8M | if (_list_extend(self, iterable) < 0) { |
1503 | 674 | return NULL; |
1504 | 674 | } |
1505 | 18.8M | Py_RETURN_NONE; |
1506 | 18.8M | } |
1507 | | |
1508 | | PyObject * |
1509 | | _PyList_Extend(PyListObject *self, PyObject *iterable) |
1510 | 17.6M | { |
1511 | 17.6M | return list_extend((PyObject*)self, iterable); |
1512 | 17.6M | } |
1513 | | |
1514 | | int |
1515 | | PyList_Extend(PyObject *self, PyObject *iterable) |
1516 | 0 | { |
1517 | 0 | if (!PyList_Check(self)) { |
1518 | 0 | PyErr_BadInternalCall(); |
1519 | 0 | return -1; |
1520 | 0 | } |
1521 | 0 | return _list_extend((PyListObject*)self, iterable); |
1522 | 0 | } |
1523 | | |
1524 | | |
1525 | | int |
1526 | | PyList_Clear(PyObject *self) |
1527 | 0 | { |
1528 | 0 | if (!PyList_Check(self)) { |
1529 | 0 | PyErr_BadInternalCall(); |
1530 | 0 | return -1; |
1531 | 0 | } |
1532 | 0 | Py_BEGIN_CRITICAL_SECTION(self); |
1533 | 0 | list_clear((PyListObject*)self); |
1534 | 0 | Py_END_CRITICAL_SECTION(); |
1535 | 0 | return 0; |
1536 | 0 | } |
1537 | | |
1538 | | |
1539 | | static PyObject * |
1540 | | list_inplace_concat(PyObject *_self, PyObject *other) |
1541 | 198k | { |
1542 | 198k | PyListObject *self = (PyListObject *)_self; |
1543 | 198k | if (_list_extend(self, other) < 0) { |
1544 | 0 | return NULL; |
1545 | 0 | } |
1546 | 198k | return Py_NewRef(self); |
1547 | 198k | } |
1548 | | |
1549 | | /*[clinic input] |
1550 | | @critical_section |
1551 | | list.pop |
1552 | | |
1553 | | index: Py_ssize_t = -1 |
1554 | | / |
1555 | | |
1556 | | Remove and return item at index (default last). |
1557 | | |
1558 | | Raises IndexError if list is empty or index is out of range. |
1559 | | [clinic start generated code]*/ |
1560 | | |
1561 | | static PyObject * |
1562 | | list_pop_impl(PyListObject *self, Py_ssize_t index) |
1563 | | /*[clinic end generated code: output=6bd69dcb3f17eca8 input=c269141068ae4b8f]*/ |
1564 | 38.5M | { |
1565 | 38.5M | PyObject *v; |
1566 | | |
1567 | 38.5M | if (Py_SIZE(self) == 0) { |
1568 | | /* Special-case most common failure cause */ |
1569 | 0 | PyErr_SetString(PyExc_IndexError, "pop from empty list"); |
1570 | 0 | return NULL; |
1571 | 0 | } |
1572 | 38.5M | if (index < 0) |
1573 | 19.6M | index += Py_SIZE(self); |
1574 | 38.5M | if (!valid_index(index, Py_SIZE(self))) { |
1575 | 0 | PyErr_SetString(PyExc_IndexError, "pop index out of range"); |
1576 | 0 | return NULL; |
1577 | 0 | } |
1578 | | |
1579 | 38.5M | PyObject **items = self->ob_item; |
1580 | 38.5M | v = items[index]; |
1581 | 38.5M | if (Py_SIZE(self) == 1) { |
1582 | 15.6M | Py_INCREF(v); |
1583 | 15.6M | list_clear(self); |
1584 | 15.6M | return v; |
1585 | 15.6M | } |
1586 | 22.9M | Py_ssize_t size_after_pop = Py_SIZE(self) - 1; |
1587 | 22.9M | if (index < size_after_pop) { |
1588 | 11.7M | ptr_wise_atomic_memmove(self, &items[index], &items[index+1], |
1589 | 11.7M | size_after_pop - index); |
1590 | 11.7M | } |
1591 | 22.9M | list_resize(self, size_after_pop); // NB: shrinking a list can't fail |
1592 | 22.9M | return v; |
1593 | 38.5M | } |
1594 | | |
1595 | | /* Reverse a slice of a list in place, from lo up to (exclusive) hi. */ |
1596 | | static void |
1597 | | reverse_slice(PyObject **lo, PyObject **hi) |
1598 | 135k | { |
1599 | 135k | assert(lo && hi); |
1600 | | |
1601 | 135k | --hi; |
1602 | 448k | while (lo < hi) { |
1603 | 312k | PyObject *t = *lo; |
1604 | 312k | FT_ATOMIC_STORE_PTR_RELEASE(*lo, *hi); |
1605 | 312k | FT_ATOMIC_STORE_PTR_RELEASE(*hi, t); |
1606 | 312k | ++lo; |
1607 | 312k | --hi; |
1608 | 312k | } |
1609 | 135k | } |
1610 | | |
1611 | | /* Lots of code for an adaptive, stable, natural mergesort. There are many |
1612 | | * pieces to this algorithm; read listsort.txt for overviews and details. |
1613 | | */ |
1614 | | |
1615 | | /* A sortslice contains a pointer to an array of keys and a pointer to |
1616 | | * an array of corresponding values. In other words, keys[i] |
1617 | | * corresponds with values[i]. If values == NULL, then the keys are |
1618 | | * also the values. |
1619 | | * |
1620 | | * Several convenience routines are provided here, so that keys and |
1621 | | * values are always moved in sync. |
1622 | | */ |
1623 | | |
1624 | | typedef struct { |
1625 | | PyObject **keys; |
1626 | | PyObject **values; |
1627 | | } sortslice; |
1628 | | |
1629 | | Py_LOCAL_INLINE(void) |
1630 | | sortslice_copy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j) |
1631 | 36.6k | { |
1632 | 36.6k | s1->keys[i] = s2->keys[j]; |
1633 | 36.6k | if (s1->values != NULL) |
1634 | 29.5k | s1->values[i] = s2->values[j]; |
1635 | 36.6k | } |
1636 | | |
1637 | | Py_LOCAL_INLINE(void) |
1638 | | sortslice_copy_incr(sortslice *dst, sortslice *src) |
1639 | 1.10M | { |
1640 | 1.10M | *dst->keys++ = *src->keys++; |
1641 | 1.10M | if (dst->values != NULL) |
1642 | 357k | *dst->values++ = *src->values++; |
1643 | 1.10M | } |
1644 | | |
1645 | | Py_LOCAL_INLINE(void) |
1646 | | sortslice_copy_decr(sortslice *dst, sortslice *src) |
1647 | 423k | { |
1648 | 423k | *dst->keys-- = *src->keys--; |
1649 | 423k | if (dst->values != NULL) |
1650 | 346k | *dst->values-- = *src->values--; |
1651 | 423k | } |
1652 | | |
1653 | | |
1654 | | Py_LOCAL_INLINE(void) |
1655 | | sortslice_memcpy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j, |
1656 | | Py_ssize_t n) |
1657 | 205k | { |
1658 | 205k | memcpy(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n); |
1659 | 205k | if (s1->values != NULL) |
1660 | 155k | memcpy(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n); |
1661 | 205k | } |
1662 | | |
1663 | | Py_LOCAL_INLINE(void) |
1664 | | sortslice_memmove(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j, |
1665 | | Py_ssize_t n) |
1666 | 148k | { |
1667 | 148k | memmove(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n); |
1668 | 148k | if (s1->values != NULL) |
1669 | 106k | memmove(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n); |
1670 | 148k | } |
1671 | | |
1672 | | Py_LOCAL_INLINE(void) |
1673 | | sortslice_advance(sortslice *slice, Py_ssize_t n) |
1674 | 861k | { |
1675 | 861k | slice->keys += n; |
1676 | 861k | if (slice->values != NULL) |
1677 | 597k | slice->values += n; |
1678 | 861k | } |
1679 | | |
1680 | | /* Comparison function: ms->key_compare, which is set at run-time in |
1681 | | * listsort_impl to optimize for various special cases. |
1682 | | * Returns -1 on error, 1 if x < y, 0 if x >= y. |
1683 | | */ |
1684 | | |
1685 | 18.5M | #define ISLT(X, Y) (*(ms->key_compare))(X, Y, ms) |
1686 | | |
1687 | | /* Compare X to Y via "<". Goto "fail" if the comparison raises an |
1688 | | error. Else "k" is set to true iff X<Y, and an "if (k)" block is |
1689 | | started. It makes more sense in context <wink>. X and Y are PyObject*s. |
1690 | | */ |
1691 | 17.3M | #define IFLT(X, Y) if ((k = ISLT(X, Y)) < 0) goto fail; \ |
1692 | 17.3M | if (k) |
1693 | | |
1694 | | /* The maximum number of entries in a MergeState's pending-runs stack. |
1695 | | * For a list with n elements, this needs at most floor(log2(n)) + 1 entries |
1696 | | * even if we didn't force runs to a minimal length. So the number of bits |
1697 | | * in a Py_ssize_t is plenty large enough for all cases. |
1698 | | */ |
1699 | | #define MAX_MERGE_PENDING (SIZEOF_SIZE_T * 8) |
1700 | | |
1701 | | /* When we get into galloping mode, we stay there until both runs win less |
1702 | | * often than MIN_GALLOP consecutive times. See listsort.txt for more info. |
1703 | | */ |
1704 | 1.14M | #define MIN_GALLOP 7 |
1705 | | |
1706 | | /* Avoid malloc for small temp arrays. */ |
1707 | 2.13M | #define MERGESTATE_TEMP_SIZE 256 |
1708 | | |
1709 | | /* The largest value of minrun. This must be a power of 2, and >= 1 */ |
1710 | 905k | #define MAX_MINRUN 64 |
1711 | | #if ((MAX_MINRUN) < 1) || ((MAX_MINRUN) & ((MAX_MINRUN) - 1)) |
1712 | | #error "MAX_MINRUN must be a power of 2, and >= 1" |
1713 | | #endif |
1714 | | |
1715 | | /* One MergeState exists on the stack per invocation of mergesort. It's just |
1716 | | * a convenient way to pass state around among the helper functions. |
1717 | | */ |
1718 | | struct s_slice { |
1719 | | sortslice base; |
1720 | | Py_ssize_t len; /* length of run */ |
1721 | | int power; /* node "level" for powersort merge strategy */ |
1722 | | }; |
1723 | | |
1724 | | typedef struct s_MergeState MergeState; |
1725 | | struct s_MergeState { |
1726 | | /* This controls when we get *into* galloping mode. It's initialized |
1727 | | * to MIN_GALLOP. merge_lo and merge_hi tend to nudge it higher for |
1728 | | * random data, and lower for highly structured data. |
1729 | | */ |
1730 | | Py_ssize_t min_gallop; |
1731 | | |
1732 | | Py_ssize_t listlen; /* len(input_list) - read only */ |
1733 | | PyObject **basekeys; /* base address of keys array - read only */ |
1734 | | |
1735 | | /* 'a' is temp storage to help with merges. It contains room for |
1736 | | * alloced entries. |
1737 | | */ |
1738 | | sortslice a; /* may point to temparray below */ |
1739 | | Py_ssize_t alloced; |
1740 | | |
1741 | | /* A stack of n pending runs yet to be merged. Run #i starts at |
1742 | | * address base[i] and extends for len[i] elements. It's always |
1743 | | * true (so long as the indices are in bounds) that |
1744 | | * |
1745 | | * pending[i].base + pending[i].len == pending[i+1].base |
1746 | | * |
1747 | | * so we could cut the storage for this, but it's a minor amount, |
1748 | | * and keeping all the info explicit simplifies the code. |
1749 | | */ |
1750 | | int n; |
1751 | | struct s_slice pending[MAX_MERGE_PENDING]; |
1752 | | |
1753 | | /* 'a' points to this when possible, rather than muck with malloc. */ |
1754 | | PyObject *temparray[MERGESTATE_TEMP_SIZE]; |
1755 | | |
1756 | | /* This is the function we will use to compare two keys, |
1757 | | * even when none of our special cases apply and we have to use |
1758 | | * safe_object_compare. */ |
1759 | | int (*key_compare)(PyObject *, PyObject *, MergeState *); |
1760 | | |
1761 | | /* This function is used by unsafe_object_compare to optimize comparisons |
1762 | | * when we know our list is type-homogeneous but we can't assume anything else. |
1763 | | * In the pre-sort check it is set equal to Py_TYPE(key)->tp_richcompare */ |
1764 | | PyObject *(*key_richcompare)(PyObject *, PyObject *, int); |
1765 | | |
1766 | | /* This function is used by unsafe_tuple_compare to compare the first elements |
1767 | | * of tuples. It may be set to safe_object_compare, but the idea is that hopefully |
1768 | | * we can assume more, and use one of the special-case compares. */ |
1769 | | int (*tuple_elem_compare)(PyObject *, PyObject *, MergeState *); |
1770 | | |
1771 | | /* Varisbles used for minrun computation. The "ideal" minrun length is |
1772 | | * the infinite precision listlen / 2**e. See listsort.txt. |
1773 | | */ |
1774 | | Py_ssize_t mr_current, mr_e, mr_mask; |
1775 | | }; |
1776 | | |
1777 | | /* binarysort is the best method for sorting small arrays: it does few |
1778 | | compares, but can do data movement quadratic in the number of elements. |
1779 | | ss->keys is viewed as an array of n kays, a[:n]. a[:ok] is already sorted. |
1780 | | Pass ok = 0 (or 1) if you don't know. |
1781 | | It's sorted in-place, by a stable binary insertion sort. If ss->values |
1782 | | isn't NULL, it's permuted in lockstap with ss->keys. |
1783 | | On entry, must have n >= 1, and 0 <= ok <= n <= MAX_MINRUN. |
1784 | | Return -1 if comparison raises an exception, else 0. |
1785 | | Even in case of error, the output slice will be some permutation of |
1786 | | the input (nothing is lost or duplicated). |
1787 | | */ |
1788 | | static int |
1789 | | binarysort(MergeState *ms, const sortslice *ss, Py_ssize_t n, Py_ssize_t ok) |
1790 | 141k | { |
1791 | 141k | Py_ssize_t k; /* for IFLT macro expansion */ |
1792 | 141k | PyObject ** const a = ss->keys; |
1793 | 141k | PyObject ** const v = ss->values; |
1794 | 141k | const bool has_values = v != NULL; |
1795 | 141k | PyObject *pivot; |
1796 | 141k | Py_ssize_t M; |
1797 | | |
1798 | 141k | assert(0 <= ok && ok <= n && 1 <= n && n <= MAX_MINRUN); |
1799 | | /* assert a[:ok] is sorted */ |
1800 | 141k | if (! ok) |
1801 | 0 | ++ok; |
1802 | | /* Regular insertion sort has average- and worst-case O(n**2) cost |
1803 | | for both # of comparisons and number of bytes moved. But its branches |
1804 | | are highly predictable, and it loves sorted input (n-1 compares and no |
1805 | | data movement). This is significant in cases like sortperf.py's %sort, |
1806 | | where an out-of-order element near the start of a run is moved into |
1807 | | place slowly but then the remaining elements up to length minrun are |
1808 | | generally at worst one slot away from their correct position (so only |
1809 | | need 1 or 2 commpares to resolve). If comparisons are very fast (such |
1810 | | as for a list of Python floats), the simple inner loop leaves it |
1811 | | very competitive with binary insertion, despite that it does |
1812 | | significantly more compares overall on random data. |
1813 | | |
1814 | | Binary insertion sort has worst, average, and best case O(n log n) |
1815 | | cost for # of comparisons, but worst and average case O(n**2) cost |
1816 | | for data movement. The more expensive comparisons, the more important |
1817 | | the comparison advantage. But its branches are less predictable the |
1818 | | more "randomish" the data, and that's so significant its worst case |
1819 | | in real life is random input rather than reverse-ordered (which does |
1820 | | about twice the data movement than random input does). |
1821 | | |
1822 | | Note that the number of bytes moved doesn't seem to matter. MAX_MINRUN |
1823 | | of 64 is so small that the key and value pointers all fit in a corner |
1824 | | of L1 cache, and moving things around in that is very fast. */ |
1825 | | #if 0 // ordinary insertion sort. |
1826 | | PyObject * vpivot = NULL; |
1827 | | for (; ok < n; ++ok) { |
1828 | | pivot = a[ok]; |
1829 | | if (has_values) |
1830 | | vpivot = v[ok]; |
1831 | | for (M = ok - 1; M >= 0; --M) { |
1832 | | k = ISLT(pivot, a[M]); |
1833 | | if (k < 0) { |
1834 | | a[M + 1] = pivot; |
1835 | | if (has_values) |
1836 | | v[M + 1] = vpivot; |
1837 | | goto fail; |
1838 | | } |
1839 | | else if (k) { |
1840 | | a[M + 1] = a[M]; |
1841 | | if (has_values) |
1842 | | v[M + 1] = v[M]; |
1843 | | } |
1844 | | else |
1845 | | break; |
1846 | | } |
1847 | | a[M + 1] = pivot; |
1848 | | if (has_values) |
1849 | | v[M + 1] = vpivot; |
1850 | | } |
1851 | | #else // binary insertion sort |
1852 | 141k | Py_ssize_t L, R; |
1853 | 2.81M | for (; ok < n; ++ok) { |
1854 | | /* set L to where a[ok] belongs */ |
1855 | 2.67M | L = 0; |
1856 | 2.67M | R = ok; |
1857 | 2.67M | pivot = a[ok]; |
1858 | | /* Slice invariants. vacuously true at the start: |
1859 | | * all a[0:L] <= pivot |
1860 | | * all a[L:R] unknown |
1861 | | * all a[R:ok] > pivot |
1862 | | */ |
1863 | 2.67M | assert(L < R); |
1864 | 11.4M | do { |
1865 | | /* don't do silly ;-) things to prevent overflow when finding |
1866 | | the midpoint; L and R are very far from filling a Py_ssize_t */ |
1867 | 11.4M | M = (L + R) >> 1; |
1868 | 11.4M | #if 1 // straightforward, but highly unpredictable branch on random data |
1869 | 11.4M | IFLT(pivot, a[M]) |
1870 | 5.23M | R = M; |
1871 | 6.25M | else |
1872 | 6.25M | L = M + 1; |
1873 | | #else |
1874 | | /* Try to get compiler to generate conditional move instructions |
1875 | | instead. Works fine, but leaving it disabled for now because |
1876 | | it's not yielding consistently faster sorts. Needs more |
1877 | | investigation. More computation in the inner loop adds its own |
1878 | | costs, which can be significant when compares are fast. */ |
1879 | | k = ISLT(pivot, a[M]); |
1880 | | if (k < 0) |
1881 | | goto fail; |
1882 | | Py_ssize_t Mp1 = M + 1; |
1883 | | R = k ? M : R; |
1884 | | L = k ? L : Mp1; |
1885 | | #endif |
1886 | 11.4M | } while (L < R); |
1887 | 2.67M | assert(L == R); |
1888 | | /* a[:L] holds all elements from a[:ok] <= pivot now, so pivot belongs |
1889 | | at index L. Slide a[L:ok] to the right a slot to make room for it. |
1890 | | Caution: using memmove is much slower under MSVC 5; we're not |
1891 | | usually moving many slots. Years later: under Visual Studio 2022, |
1892 | | memmove seems just slightly slower than doing it "by hand". */ |
1893 | 24.8M | for (M = ok; M > L; --M) |
1894 | 22.1M | a[M] = a[M - 1]; |
1895 | 2.67M | a[L] = pivot; |
1896 | 2.67M | if (has_values) { |
1897 | 1.49M | pivot = v[ok]; |
1898 | 8.12M | for (M = ok; M > L; --M) |
1899 | 6.62M | v[M] = v[M - 1]; |
1900 | 1.49M | v[L] = pivot; |
1901 | 1.49M | } |
1902 | 2.67M | } |
1903 | 141k | #endif // pick binary or regular insertion sort |
1904 | 141k | return 0; |
1905 | | |
1906 | 0 | fail: |
1907 | 0 | return -1; |
1908 | 141k | } |
1909 | | |
1910 | | static void |
1911 | | sortslice_reverse(sortslice *s, Py_ssize_t n) |
1912 | 86.2k | { |
1913 | 86.2k | reverse_slice(s->keys, &s->keys[n]); |
1914 | 86.2k | if (s->values != NULL) |
1915 | 49.0k | reverse_slice(s->values, &s->values[n]); |
1916 | 86.2k | } |
1917 | | |
1918 | | /* |
1919 | | Return the length of the run beginning at slo->keys, spanning no more than |
1920 | | nremaining elements. The run beginning there may be ascending or descending, |
1921 | | but the function permutes it in place, if needed, so that it's always ascending |
1922 | | upon return. |
1923 | | |
1924 | | Returns -1 in case of error. |
1925 | | */ |
1926 | | static Py_ssize_t |
1927 | | count_run(MergeState *ms, sortslice *slo, Py_ssize_t nremaining) |
1928 | 296k | { |
1929 | 296k | Py_ssize_t k; /* used by IFLT macro expansion */ |
1930 | 296k | Py_ssize_t n; |
1931 | 296k | PyObject ** const lo = slo->keys; |
1932 | | |
1933 | | /* In general, as things go on we've established that the slice starts |
1934 | | with a monotone run of n elements, starting at lo. */ |
1935 | | |
1936 | | /* We're n elements into the slice, and the most recent neq+1 elements are |
1937 | | * all equal. This reverses them in-place, and resets neq for reuse. |
1938 | | */ |
1939 | 296k | #define REVERSE_LAST_NEQ \ |
1940 | 296k | if (neq) { \ |
1941 | 9.52k | sortslice slice = *slo; \ |
1942 | 9.52k | ++neq; \ |
1943 | 9.52k | sortslice_advance(&slice, n - neq); \ |
1944 | 9.52k | sortslice_reverse(&slice, neq); \ |
1945 | 9.52k | neq = 0; \ |
1946 | 9.52k | } |
1947 | | |
1948 | | /* Sticking to only __lt__ compares is confusing and error-prone. But in |
1949 | | * this routine, almost all uses of IFLT can be captured by tiny macros |
1950 | | * giving mnemonic names to the intent. Note that inline functions don't |
1951 | | * work for this (IFLT expands to code including `goto fail`). |
1952 | | */ |
1953 | 296k | #define IF_NEXT_LARGER IFLT(lo[n-1], lo[n]) |
1954 | 3.39M | #define IF_NEXT_SMALLER IFLT(lo[n], lo[n-1]) |
1955 | | |
1956 | 296k | assert(nremaining); |
1957 | | /* try ascending run first */ |
1958 | 3.12M | for (n = 1; n < nremaining; ++n) { |
1959 | 3.01M | IF_NEXT_SMALLER |
1960 | 192k | break; |
1961 | 3.01M | } |
1962 | 296k | if (n == nremaining) |
1963 | 104k | return n; |
1964 | | /* lo[n] is strictly less */ |
1965 | | /* If n is 1 now, then the first compare established it's a descending |
1966 | | * run, so fall through to the descending case. But if n > 1, there are |
1967 | | * n elements in an ascending run terminated by the strictly less lo[n]. |
1968 | | * If the first key < lo[n-1], *somewhere* along the way the sequence |
1969 | | * increased, so we're done (there is no descending run). |
1970 | | * Else first key >= lo[n-1], which implies that the entire ascending run |
1971 | | * consists of equal elements. In that case, this is a descending run, |
1972 | | * and we reverse the all-equal prefix in-place. |
1973 | | */ |
1974 | 192k | if (n > 1) { |
1975 | 125k | IFLT(lo[0], lo[n-1]) |
1976 | 120k | return n; |
1977 | 4.71k | sortslice_reverse(slo, n); |
1978 | 4.71k | } |
1979 | 72.0k | ++n; /* in all cases it's been established that lo[n] has been resolved */ |
1980 | | |
1981 | | /* Finish descending run. All-squal subruns are reversed in-place on the |
1982 | | * fly. Their original order will be restored at the end by the whole-slice |
1983 | | * reversal. |
1984 | | */ |
1985 | 72.0k | Py_ssize_t neq = 0; |
1986 | 121k | for ( ; n < nremaining; ++n) { |
1987 | 87.7k | IF_NEXT_SMALLER { |
1988 | | /* This ends the most recent run of equal elements, but still in |
1989 | | * the "descending" direction. |
1990 | | */ |
1991 | 14.9k | REVERSE_LAST_NEQ |
1992 | 14.9k | } |
1993 | 72.7k | else { |
1994 | 72.7k | IF_NEXT_LARGER /* descending run is over */ |
1995 | 38.3k | break; |
1996 | 34.4k | else /* not x < y and not y < x implies x == y */ |
1997 | 34.4k | ++neq; |
1998 | 72.7k | } |
1999 | 87.7k | } |
2000 | 72.0k | REVERSE_LAST_NEQ |
2001 | 72.0k | sortslice_reverse(slo, n); /* transform to ascending run */ |
2002 | | |
2003 | | /* And after reversing, it's possible this can be extended by a |
2004 | | * naturally increasing suffix; e.g., [3, 2, 3, 4, 1] makes an |
2005 | | * ascending run from the first 4 elements. |
2006 | | */ |
2007 | 325k | for ( ; n < nremaining; ++n) { |
2008 | 290k | IF_NEXT_SMALLER |
2009 | 36.5k | break; |
2010 | 290k | } |
2011 | | |
2012 | 72.0k | return n; |
2013 | 0 | fail: |
2014 | 0 | return -1; |
2015 | | |
2016 | 72.0k | #undef REVERSE_LAST_NEQ |
2017 | 72.0k | #undef IF_NEXT_SMALLER |
2018 | 72.0k | #undef IF_NEXT_LARGER |
2019 | 72.0k | } |
2020 | | |
2021 | | /* |
2022 | | Locate the proper position of key in a sorted vector; if the vector contains |
2023 | | an element equal to key, return the position immediately to the left of |
2024 | | the leftmost equal element. [gallop_right() does the same except returns |
2025 | | the position to the right of the rightmost equal element (if any).] |
2026 | | |
2027 | | "a" is a sorted vector with n elements, starting at a[0]. n must be > 0. |
2028 | | |
2029 | | "hint" is an index at which to begin the search, 0 <= hint < n. The closer |
2030 | | hint is to the final result, the faster this runs. |
2031 | | |
2032 | | The return value is the int k in 0..n such that |
2033 | | |
2034 | | a[k-1] < key <= a[k] |
2035 | | |
2036 | | pretending that *(a-1) is minus infinity and a[n] is plus infinity. IOW, |
2037 | | key belongs at index k; or, IOW, the first k elements of a should precede |
2038 | | key, and the last n-k should follow key. |
2039 | | |
2040 | | Returns -1 on error. See listsort.txt for info on the method. |
2041 | | */ |
2042 | | static Py_ssize_t |
2043 | | gallop_left(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint) |
2044 | 208k | { |
2045 | 208k | Py_ssize_t ofs; |
2046 | 208k | Py_ssize_t lastofs; |
2047 | 208k | Py_ssize_t k; |
2048 | | |
2049 | 208k | assert(key && a && n > 0 && hint >= 0 && hint < n); |
2050 | | |
2051 | 208k | a += hint; |
2052 | 208k | lastofs = 0; |
2053 | 208k | ofs = 1; |
2054 | 208k | IFLT(*a, key) { |
2055 | | /* a[hint] < key -- gallop right, until |
2056 | | * a[hint + lastofs] < key <= a[hint + ofs] |
2057 | | */ |
2058 | 117k | const Py_ssize_t maxofs = n - hint; /* &a[n-1] is highest */ |
2059 | 359k | while (ofs < maxofs) { |
2060 | 279k | IFLT(a[ofs], key) { |
2061 | 241k | lastofs = ofs; |
2062 | 241k | assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2); |
2063 | 241k | ofs = (ofs << 1) + 1; |
2064 | 241k | } |
2065 | 37.9k | else /* key <= a[hint + ofs] */ |
2066 | 37.9k | break; |
2067 | 279k | } |
2068 | 117k | if (ofs > maxofs) |
2069 | 30.9k | ofs = maxofs; |
2070 | | /* Translate back to offsets relative to &a[0]. */ |
2071 | 117k | lastofs += hint; |
2072 | 117k | ofs += hint; |
2073 | 117k | } |
2074 | 90.1k | else { |
2075 | | /* key <= a[hint] -- gallop left, until |
2076 | | * a[hint - ofs] < key <= a[hint - lastofs] |
2077 | | */ |
2078 | 90.1k | const Py_ssize_t maxofs = hint + 1; /* &a[0] is lowest */ |
2079 | 294k | while (ofs < maxofs) { |
2080 | 263k | IFLT(*(a-ofs), key) |
2081 | 59.6k | break; |
2082 | | /* key <= a[hint - ofs] */ |
2083 | 204k | lastofs = ofs; |
2084 | 204k | assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2); |
2085 | 204k | ofs = (ofs << 1) + 1; |
2086 | 204k | } |
2087 | 90.1k | if (ofs > maxofs) |
2088 | 18.1k | ofs = maxofs; |
2089 | | /* Translate back to positive offsets relative to &a[0]. */ |
2090 | 90.1k | k = lastofs; |
2091 | 90.1k | lastofs = hint - ofs; |
2092 | 90.1k | ofs = hint - k; |
2093 | 90.1k | } |
2094 | 208k | a -= hint; |
2095 | | |
2096 | 208k | assert(-1 <= lastofs && lastofs < ofs && ofs <= n); |
2097 | | /* Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the |
2098 | | * right of lastofs but no farther right than ofs. Do a binary |
2099 | | * search, with invariant a[lastofs-1] < key <= a[ofs]. |
2100 | | */ |
2101 | 208k | ++lastofs; |
2102 | 593k | while (lastofs < ofs) { |
2103 | 385k | Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1); |
2104 | | |
2105 | 385k | IFLT(a[m], key) |
2106 | 200k | lastofs = m+1; /* a[m] < key */ |
2107 | 185k | else |
2108 | 185k | ofs = m; /* key <= a[m] */ |
2109 | 385k | } |
2110 | 208k | assert(lastofs == ofs); /* so a[ofs-1] < key <= a[ofs] */ |
2111 | 208k | return ofs; |
2112 | | |
2113 | 0 | fail: |
2114 | 0 | return -1; |
2115 | 208k | } |
2116 | | |
2117 | | /* |
2118 | | Exactly like gallop_left(), except that if key already exists in a[0:n], |
2119 | | finds the position immediately to the right of the rightmost equal value. |
2120 | | |
2121 | | The return value is the int k in 0..n such that |
2122 | | |
2123 | | a[k-1] <= key < a[k] |
2124 | | |
2125 | | or -1 if error. |
2126 | | |
2127 | | The code duplication is massive, but this is enough different given that |
2128 | | we're sticking to "<" comparisons that it's much harder to follow if |
2129 | | written as one routine with yet another "left or right?" flag. |
2130 | | */ |
2131 | | static Py_ssize_t |
2132 | | gallop_right(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint) |
2133 | 228k | { |
2134 | 228k | Py_ssize_t ofs; |
2135 | 228k | Py_ssize_t lastofs; |
2136 | 228k | Py_ssize_t k; |
2137 | | |
2138 | 228k | assert(key && a && n > 0 && hint >= 0 && hint < n); |
2139 | | |
2140 | 228k | a += hint; |
2141 | 228k | lastofs = 0; |
2142 | 228k | ofs = 1; |
2143 | 228k | IFLT(key, *a) { |
2144 | | /* key < a[hint] -- gallop left, until |
2145 | | * a[hint - ofs] <= key < a[hint - lastofs] |
2146 | | */ |
2147 | 99.3k | const Py_ssize_t maxofs = hint + 1; /* &a[0] is lowest */ |
2148 | 229k | while (ofs < maxofs) { |
2149 | 152k | IFLT(key, *(a-ofs)) { |
2150 | 129k | lastofs = ofs; |
2151 | 129k | assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2); |
2152 | 129k | ofs = (ofs << 1) + 1; |
2153 | 129k | } |
2154 | 22.8k | else /* a[hint - ofs] <= key */ |
2155 | 22.8k | break; |
2156 | 152k | } |
2157 | 99.3k | if (ofs > maxofs) |
2158 | 13.1k | ofs = maxofs; |
2159 | | /* Translate back to positive offsets relative to &a[0]. */ |
2160 | 99.3k | k = lastofs; |
2161 | 99.3k | lastofs = hint - ofs; |
2162 | 99.3k | ofs = hint - k; |
2163 | 99.3k | } |
2164 | 129k | else { |
2165 | | /* a[hint] <= key -- gallop right, until |
2166 | | * a[hint + lastofs] <= key < a[hint + ofs] |
2167 | | */ |
2168 | 129k | const Py_ssize_t maxofs = n - hint; /* &a[n-1] is highest */ |
2169 | 416k | while (ofs < maxofs) { |
2170 | 366k | IFLT(key, a[ofs]) |
2171 | 78.7k | break; |
2172 | | /* a[hint + ofs] <= key */ |
2173 | 287k | lastofs = ofs; |
2174 | 287k | assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2); |
2175 | 287k | ofs = (ofs << 1) + 1; |
2176 | 287k | } |
2177 | 129k | if (ofs > maxofs) |
2178 | 27.0k | ofs = maxofs; |
2179 | | /* Translate back to offsets relative to &a[0]. */ |
2180 | 129k | lastofs += hint; |
2181 | 129k | ofs += hint; |
2182 | 129k | } |
2183 | 228k | a -= hint; |
2184 | | |
2185 | 228k | assert(-1 <= lastofs && lastofs < ofs && ofs <= n); |
2186 | | /* Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the |
2187 | | * right of lastofs but no farther right than ofs. Do a binary |
2188 | | * search, with invariant a[lastofs-1] <= key < a[ofs]. |
2189 | | */ |
2190 | 228k | ++lastofs; |
2191 | 594k | while (lastofs < ofs) { |
2192 | 365k | Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1); |
2193 | | |
2194 | 365k | IFLT(key, a[m]) |
2195 | 189k | ofs = m; /* key < a[m] */ |
2196 | 176k | else |
2197 | 176k | lastofs = m+1; /* a[m] <= key */ |
2198 | 365k | } |
2199 | 228k | assert(lastofs == ofs); /* so a[ofs-1] <= key < a[ofs] */ |
2200 | 228k | return ofs; |
2201 | | |
2202 | 0 | fail: |
2203 | 0 | return -1; |
2204 | 228k | } |
2205 | | |
2206 | | /* Conceptually a MergeState's constructor. */ |
2207 | | static void |
2208 | | merge_init(MergeState *ms, Py_ssize_t list_size, int has_keyfunc, |
2209 | | sortslice *lo) |
2210 | 871k | { |
2211 | 871k | assert(ms != NULL); |
2212 | 871k | if (has_keyfunc) { |
2213 | | /* The temporary space for merging will need at most half the list |
2214 | | * size rounded up. Use the minimum possible space so we can use the |
2215 | | * rest of temparray for other things. In particular, if there is |
2216 | | * enough extra space, listsort() will use it to store the keys. |
2217 | | */ |
2218 | 631k | ms->alloced = (list_size + 1) / 2; |
2219 | | |
2220 | | /* ms->alloced describes how many keys will be stored at |
2221 | | ms->temparray, but we also need to store the values. Hence, |
2222 | | ms->alloced is capped at half of MERGESTATE_TEMP_SIZE. */ |
2223 | 631k | if (MERGESTATE_TEMP_SIZE / 2 < ms->alloced) |
2224 | 2.73k | ms->alloced = MERGESTATE_TEMP_SIZE / 2; |
2225 | 631k | ms->a.values = &ms->temparray[ms->alloced]; |
2226 | 631k | } |
2227 | 239k | else { |
2228 | 239k | ms->alloced = MERGESTATE_TEMP_SIZE; |
2229 | 239k | ms->a.values = NULL; |
2230 | 239k | } |
2231 | 871k | ms->a.keys = ms->temparray; |
2232 | 871k | ms->n = 0; |
2233 | 871k | ms->min_gallop = MIN_GALLOP; |
2234 | 871k | ms->listlen = list_size; |
2235 | 871k | ms->basekeys = lo->keys; |
2236 | | |
2237 | | /* State for generating minrun values. See listsort.txt. */ |
2238 | 871k | ms->mr_e = 0; |
2239 | 905k | while (list_size >> ms->mr_e >= MAX_MINRUN) { |
2240 | 34.8k | ++ms->mr_e; |
2241 | 34.8k | } |
2242 | 871k | ms->mr_mask = (1 << ms->mr_e) - 1; |
2243 | 871k | ms->mr_current = 0; |
2244 | 871k | } |
2245 | | |
2246 | | /* Free all the temp memory owned by the MergeState. This must be called |
2247 | | * when you're done with a MergeState, and may be called before then if |
2248 | | * you want to free the temp memory early. |
2249 | | */ |
2250 | | static void |
2251 | | merge_freemem(MergeState *ms) |
2252 | 876k | { |
2253 | 876k | assert(ms != NULL); |
2254 | 876k | if (ms->a.keys != ms->temparray) { |
2255 | 5.23k | PyMem_Free(ms->a.keys); |
2256 | 5.23k | ms->a.keys = NULL; |
2257 | 5.23k | } |
2258 | 876k | } |
2259 | | |
2260 | | /* Ensure enough temp memory for 'need' array slots is available. |
2261 | | * Returns 0 on success and -1 if the memory can't be gotten. |
2262 | | */ |
2263 | | static int |
2264 | | merge_getmem(MergeState *ms, Py_ssize_t need) |
2265 | 5.23k | { |
2266 | 5.23k | int multiplier; |
2267 | | |
2268 | 5.23k | assert(ms != NULL); |
2269 | 5.23k | if (need <= ms->alloced) |
2270 | 0 | return 0; |
2271 | | |
2272 | 5.23k | multiplier = ms->a.values != NULL ? 2 : 1; |
2273 | | |
2274 | | /* Don't realloc! That can cost cycles to copy the old data, but |
2275 | | * we don't care what's in the block. |
2276 | | */ |
2277 | 5.23k | merge_freemem(ms); |
2278 | 5.23k | if ((size_t)need > PY_SSIZE_T_MAX / sizeof(PyObject *) / multiplier) { |
2279 | 0 | PyErr_NoMemory(); |
2280 | 0 | return -1; |
2281 | 0 | } |
2282 | 5.23k | ms->a.keys = (PyObject **)PyMem_Malloc(multiplier * need |
2283 | 5.23k | * sizeof(PyObject *)); |
2284 | 5.23k | if (ms->a.keys != NULL) { |
2285 | 5.23k | ms->alloced = need; |
2286 | 5.23k | if (ms->a.values != NULL) |
2287 | 4.69k | ms->a.values = &ms->a.keys[need]; |
2288 | 5.23k | return 0; |
2289 | 5.23k | } |
2290 | 0 | PyErr_NoMemory(); |
2291 | 0 | return -1; |
2292 | 5.23k | } |
2293 | 75.0k | #define MERGE_GETMEM(MS, NEED) ((NEED) <= (MS)->alloced ? 0 : \ |
2294 | 75.0k | merge_getmem(MS, NEED)) |
2295 | | |
2296 | | /* Merge the na elements starting at ssa with the nb elements starting at |
2297 | | * ssb.keys = ssa.keys + na in a stable way, in-place. na and nb must be > 0. |
2298 | | * Must also have that ssa.keys[na-1] belongs at the end of the merge, and |
2299 | | * should have na <= nb. See listsort.txt for more info. Return 0 if |
2300 | | * successful, -1 if error. |
2301 | | */ |
2302 | | static Py_ssize_t |
2303 | | merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, |
2304 | | sortslice ssb, Py_ssize_t nb) |
2305 | 50.3k | { |
2306 | 50.3k | Py_ssize_t k; |
2307 | 50.3k | sortslice dest; |
2308 | 50.3k | int result = -1; /* guilty until proved innocent */ |
2309 | 50.3k | Py_ssize_t min_gallop; |
2310 | | |
2311 | 50.3k | assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0); |
2312 | 50.3k | assert(ssa.keys + na == ssb.keys); |
2313 | 50.3k | if (MERGE_GETMEM(ms, na) < 0) |
2314 | 0 | return -1; |
2315 | 50.3k | sortslice_memcpy(&ms->a, 0, &ssa, 0, na); |
2316 | 50.3k | dest = ssa; |
2317 | 50.3k | ssa = ms->a; |
2318 | | |
2319 | 50.3k | sortslice_copy_incr(&dest, &ssb); |
2320 | 50.3k | --nb; |
2321 | 50.3k | if (nb == 0) |
2322 | 2.51k | goto Succeed; |
2323 | 47.8k | if (na == 1) |
2324 | 6.46k | goto CopyB; |
2325 | | |
2326 | 41.3k | min_gallop = ms->min_gallop; |
2327 | 71.6k | for (;;) { |
2328 | 71.6k | Py_ssize_t acount = 0; /* # of times A won in a row */ |
2329 | 71.6k | Py_ssize_t bcount = 0; /* # of times B won in a row */ |
2330 | | |
2331 | | /* Do the straightforward thing until (if ever) one run |
2332 | | * appears to win consistently. |
2333 | | */ |
2334 | 901k | for (;;) { |
2335 | 901k | assert(na > 1 && nb > 0); |
2336 | 901k | k = ISLT(ssb.keys[0], ssa.keys[0]); |
2337 | 901k | if (k) { |
2338 | 489k | if (k < 0) |
2339 | 0 | goto Fail; |
2340 | 489k | sortslice_copy_incr(&dest, &ssb); |
2341 | 489k | ++bcount; |
2342 | 489k | acount = 0; |
2343 | 489k | --nb; |
2344 | 489k | if (nb == 0) |
2345 | 4.36k | goto Succeed; |
2346 | 485k | if (bcount >= min_gallop) |
2347 | 33.5k | break; |
2348 | 485k | } |
2349 | 411k | else { |
2350 | 411k | sortslice_copy_incr(&dest, &ssa); |
2351 | 411k | ++acount; |
2352 | 411k | bcount = 0; |
2353 | 411k | --na; |
2354 | 411k | if (na == 1) |
2355 | 8.63k | goto CopyB; |
2356 | 402k | if (acount >= min_gallop) |
2357 | 25.0k | break; |
2358 | 402k | } |
2359 | 901k | } |
2360 | | |
2361 | | /* One run is winning so consistently that galloping may |
2362 | | * be a huge win. So try that, and continue galloping until |
2363 | | * (if ever) neither run appears to be winning consistently |
2364 | | * anymore. |
2365 | | */ |
2366 | 58.6k | ++min_gallop; |
2367 | 94.4k | do { |
2368 | 94.4k | assert(na > 1 && nb > 0); |
2369 | 94.4k | min_gallop -= min_gallop > 1; |
2370 | 94.4k | ms->min_gallop = min_gallop; |
2371 | 94.4k | k = gallop_right(ms, ssb.keys[0], ssa.keys, na, 0); |
2372 | 94.4k | acount = k; |
2373 | 94.4k | if (k) { |
2374 | 50.9k | if (k < 0) |
2375 | 0 | goto Fail; |
2376 | 50.9k | sortslice_memcpy(&dest, 0, &ssa, 0, k); |
2377 | 50.9k | sortslice_advance(&dest, k); |
2378 | 50.9k | sortslice_advance(&ssa, k); |
2379 | 50.9k | na -= k; |
2380 | 50.9k | if (na == 1) |
2381 | 6.41k | goto CopyB; |
2382 | | /* na==0 is impossible now if the comparison |
2383 | | * function is consistent, but we can't assume |
2384 | | * that it is. |
2385 | | */ |
2386 | 44.5k | if (na == 0) |
2387 | 0 | goto Succeed; |
2388 | 44.5k | } |
2389 | 88.0k | sortslice_copy_incr(&dest, &ssb); |
2390 | 88.0k | --nb; |
2391 | 88.0k | if (nb == 0) |
2392 | 2.05k | goto Succeed; |
2393 | | |
2394 | 86.0k | k = gallop_left(ms, ssa.keys[0], ssb.keys, nb, 0); |
2395 | 86.0k | bcount = k; |
2396 | 86.0k | if (k) { |
2397 | 74.5k | if (k < 0) |
2398 | 0 | goto Fail; |
2399 | 74.5k | sortslice_memmove(&dest, 0, &ssb, 0, k); |
2400 | 74.5k | sortslice_advance(&dest, k); |
2401 | 74.5k | sortslice_advance(&ssb, k); |
2402 | 74.5k | nb -= k; |
2403 | 74.5k | if (nb == 0) |
2404 | 16.6k | goto Succeed; |
2405 | 74.5k | } |
2406 | 69.3k | sortslice_copy_incr(&dest, &ssa); |
2407 | 69.3k | --na; |
2408 | 69.3k | if (na == 1) |
2409 | 3.28k | goto CopyB; |
2410 | 69.3k | } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP); |
2411 | 30.2k | ++min_gallop; /* penalize it for leaving galloping mode */ |
2412 | 30.2k | ms->min_gallop = min_gallop; |
2413 | 30.2k | } |
2414 | 25.5k | Succeed: |
2415 | 25.5k | result = 0; |
2416 | 25.5k | Fail: |
2417 | 25.5k | if (na) |
2418 | 25.5k | sortslice_memcpy(&dest, 0, &ssa, 0, na); |
2419 | 25.5k | return result; |
2420 | 24.7k | CopyB: |
2421 | 24.7k | assert(na == 1 && nb > 0); |
2422 | | /* The last element of ssa belongs at the end of the merge. */ |
2423 | 24.7k | sortslice_memmove(&dest, 0, &ssb, 0, nb); |
2424 | 24.7k | sortslice_copy(&dest, nb, &ssa, 0); |
2425 | 24.7k | return 0; |
2426 | 25.5k | } |
2427 | | |
2428 | | /* Merge the na elements starting at pa with the nb elements starting at |
2429 | | * ssb.keys = ssa.keys + na in a stable way, in-place. na and nb must be > 0. |
2430 | | * Must also have that ssa.keys[na-1] belongs at the end of the merge, and |
2431 | | * should have na >= nb. See listsort.txt for more info. Return 0 if |
2432 | | * successful, -1 if error. |
2433 | | */ |
2434 | | static Py_ssize_t |
2435 | | merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na, |
2436 | | sortslice ssb, Py_ssize_t nb) |
2437 | 24.6k | { |
2438 | 24.6k | Py_ssize_t k; |
2439 | 24.6k | sortslice dest, basea, baseb; |
2440 | 24.6k | int result = -1; /* guilty until proved innocent */ |
2441 | 24.6k | Py_ssize_t min_gallop; |
2442 | | |
2443 | 24.6k | assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0); |
2444 | 24.6k | assert(ssa.keys + na == ssb.keys); |
2445 | 24.6k | if (MERGE_GETMEM(ms, nb) < 0) |
2446 | 0 | return -1; |
2447 | 24.6k | dest = ssb; |
2448 | 24.6k | sortslice_advance(&dest, nb-1); |
2449 | 24.6k | sortslice_memcpy(&ms->a, 0, &ssb, 0, nb); |
2450 | 24.6k | basea = ssa; |
2451 | 24.6k | baseb = ms->a; |
2452 | 24.6k | ssb.keys = ms->a.keys + nb - 1; |
2453 | 24.6k | if (ssb.values != NULL) |
2454 | 21.0k | ssb.values = ms->a.values + nb - 1; |
2455 | 24.6k | sortslice_advance(&ssa, na - 1); |
2456 | | |
2457 | 24.6k | sortslice_copy_decr(&dest, &ssa); |
2458 | 24.6k | --na; |
2459 | 24.6k | if (na == 0) |
2460 | 0 | goto Succeed; |
2461 | 24.6k | if (nb == 1) |
2462 | 1.11k | goto CopyA; |
2463 | | |
2464 | 23.5k | min_gallop = ms->min_gallop; |
2465 | 34.5k | for (;;) { |
2466 | 34.5k | Py_ssize_t acount = 0; /* # of times A won in a row */ |
2467 | 34.5k | Py_ssize_t bcount = 0; /* # of times B won in a row */ |
2468 | | |
2469 | | /* Do the straightforward thing until (if ever) one run |
2470 | | * appears to win consistently. |
2471 | | */ |
2472 | 313k | for (;;) { |
2473 | 313k | assert(na > 0 && nb > 1); |
2474 | 313k | k = ISLT(ssb.keys[0], ssa.keys[0]); |
2475 | 313k | if (k) { |
2476 | 154k | if (k < 0) |
2477 | 0 | goto Fail; |
2478 | 154k | sortslice_copy_decr(&dest, &ssa); |
2479 | 154k | ++acount; |
2480 | 154k | bcount = 0; |
2481 | 154k | --na; |
2482 | 154k | if (na == 0) |
2483 | 805 | goto Succeed; |
2484 | 153k | if (acount >= min_gallop) |
2485 | 16.7k | break; |
2486 | 153k | } |
2487 | 159k | else { |
2488 | 159k | sortslice_copy_decr(&dest, &ssb); |
2489 | 159k | ++bcount; |
2490 | 159k | acount = 0; |
2491 | 159k | --nb; |
2492 | 159k | if (nb == 1) |
2493 | 569 | goto CopyA; |
2494 | 158k | if (bcount >= min_gallop) |
2495 | 16.4k | break; |
2496 | 158k | } |
2497 | 313k | } |
2498 | | |
2499 | | /* One run is winning so consistently that galloping may |
2500 | | * be a huge win. So try that, and continue galloping until |
2501 | | * (if ever) neither run appears to be winning consistently |
2502 | | * anymore. |
2503 | | */ |
2504 | 33.1k | ++min_gallop; |
2505 | 58.7k | do { |
2506 | 58.7k | assert(na > 0 && nb > 1); |
2507 | 58.7k | min_gallop -= min_gallop > 1; |
2508 | 58.7k | ms->min_gallop = min_gallop; |
2509 | 58.7k | k = gallop_right(ms, ssb.keys[0], basea.keys, na, na-1); |
2510 | 58.7k | if (k < 0) |
2511 | 0 | goto Fail; |
2512 | 58.7k | k = na - k; |
2513 | 58.7k | acount = k; |
2514 | 58.7k | if (k) { |
2515 | 37.3k | sortslice_advance(&dest, -k); |
2516 | 37.3k | sortslice_advance(&ssa, -k); |
2517 | 37.3k | sortslice_memmove(&dest, 1, &ssa, 1, k); |
2518 | 37.3k | na -= k; |
2519 | 37.3k | if (na == 0) |
2520 | 10.8k | goto Succeed; |
2521 | 37.3k | } |
2522 | 47.8k | sortslice_copy_decr(&dest, &ssb); |
2523 | 47.8k | --nb; |
2524 | 47.8k | if (nb == 1) |
2525 | 841 | goto CopyA; |
2526 | | |
2527 | 47.0k | k = gallop_left(ms, ssa.keys[0], baseb.keys, nb, nb-1); |
2528 | 47.0k | if (k < 0) |
2529 | 0 | goto Fail; |
2530 | 47.0k | k = nb - k; |
2531 | 47.0k | bcount = k; |
2532 | 47.0k | if (k) { |
2533 | 40.7k | sortslice_advance(&dest, -k); |
2534 | 40.7k | sortslice_advance(&ssb, -k); |
2535 | 40.7k | sortslice_memcpy(&dest, 1, &ssb, 1, k); |
2536 | 40.7k | nb -= k; |
2537 | 40.7k | if (nb == 1) |
2538 | 9.30k | goto CopyA; |
2539 | | /* nb==0 is impossible now if the comparison |
2540 | | * function is consistent, but we can't assume |
2541 | | * that it is. |
2542 | | */ |
2543 | 31.4k | if (nb == 0) |
2544 | 0 | goto Succeed; |
2545 | 31.4k | } |
2546 | 37.7k | sortslice_copy_decr(&dest, &ssa); |
2547 | 37.7k | --na; |
2548 | 37.7k | if (na == 0) |
2549 | 1.22k | goto Succeed; |
2550 | 37.7k | } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP); |
2551 | 11.0k | ++min_gallop; /* penalize it for leaving galloping mode */ |
2552 | 11.0k | ms->min_gallop = min_gallop; |
2553 | 11.0k | } |
2554 | 12.8k | Succeed: |
2555 | 12.8k | result = 0; |
2556 | 12.8k | Fail: |
2557 | 12.8k | if (nb) |
2558 | 12.8k | sortslice_memcpy(&dest, -(nb-1), &baseb, 0, nb); |
2559 | 12.8k | return result; |
2560 | 11.8k | CopyA: |
2561 | 11.8k | assert(nb == 1 && na > 0); |
2562 | | /* The first element of ssb belongs at the front of the merge. */ |
2563 | 11.8k | sortslice_memmove(&dest, 1-na, &ssa, 1-na, na); |
2564 | 11.8k | sortslice_advance(&dest, -na); |
2565 | 11.8k | sortslice_advance(&ssa, -na); |
2566 | 11.8k | sortslice_copy(&dest, 0, &ssb, 0); |
2567 | 11.8k | return 0; |
2568 | 12.8k | } |
2569 | | |
2570 | | /* Merge the two runs at stack indices i and i+1. |
2571 | | * Returns 0 on success, -1 on error. |
2572 | | */ |
2573 | | static Py_ssize_t |
2574 | | merge_at(MergeState *ms, Py_ssize_t i) |
2575 | 75.3k | { |
2576 | 75.3k | sortslice ssa, ssb; |
2577 | 75.3k | Py_ssize_t na, nb; |
2578 | 75.3k | Py_ssize_t k; |
2579 | | |
2580 | 75.3k | assert(ms != NULL); |
2581 | 75.3k | assert(ms->n >= 2); |
2582 | 75.3k | assert(i >= 0); |
2583 | 75.3k | assert(i == ms->n - 2 || i == ms->n - 3); |
2584 | | |
2585 | 75.3k | ssa = ms->pending[i].base; |
2586 | 75.3k | na = ms->pending[i].len; |
2587 | 75.3k | ssb = ms->pending[i+1].base; |
2588 | 75.3k | nb = ms->pending[i+1].len; |
2589 | 75.3k | assert(na > 0 && nb > 0); |
2590 | 75.3k | assert(ssa.keys + na == ssb.keys); |
2591 | | |
2592 | | /* Record the length of the combined runs; if i is the 3rd-last |
2593 | | * run now, also slide over the last run (which isn't involved |
2594 | | * in this merge). The current run i+1 goes away in any case. |
2595 | | */ |
2596 | 75.3k | ms->pending[i].len = na + nb; |
2597 | 75.3k | if (i == ms->n - 3) |
2598 | 551 | ms->pending[i+1] = ms->pending[i+2]; |
2599 | 75.3k | --ms->n; |
2600 | | |
2601 | | /* Where does b start in a? Elements in a before that can be |
2602 | | * ignored (already in place). |
2603 | | */ |
2604 | 75.3k | k = gallop_right(ms, *ssb.keys, ssa.keys, na, 0); |
2605 | 75.3k | if (k < 0) |
2606 | 0 | return -1; |
2607 | 75.3k | sortslice_advance(&ssa, k); |
2608 | 75.3k | na -= k; |
2609 | 75.3k | if (na == 0) |
2610 | 315 | return 0; |
2611 | | |
2612 | | /* Where does a end in b? Elements in b after that can be |
2613 | | * ignored (already in place). |
2614 | | */ |
2615 | 75.0k | nb = gallop_left(ms, ssa.keys[na-1], ssb.keys, nb, nb-1); |
2616 | 75.0k | if (nb <= 0) |
2617 | 0 | return nb; |
2618 | | |
2619 | | /* Merge what remains of the runs, using a temp array with |
2620 | | * min(na, nb) elements. |
2621 | | */ |
2622 | 75.0k | if (na <= nb) |
2623 | 50.3k | return merge_lo(ms, ssa, na, ssb, nb); |
2624 | 24.6k | else |
2625 | 24.6k | return merge_hi(ms, ssa, na, ssb, nb); |
2626 | 75.0k | } |
2627 | | |
2628 | | /* Two adjacent runs begin at index s1. The first run has length n1, and |
2629 | | * the second run (starting at index s1+n1) has length n2. The list has total |
2630 | | * length n. |
2631 | | * Compute the "power" of the first run. See listsort.txt for details. |
2632 | | */ |
2633 | | static int |
2634 | | powerloop(Py_ssize_t s1, Py_ssize_t n1, Py_ssize_t n2, Py_ssize_t n) |
2635 | 75.3k | { |
2636 | 75.3k | int result = 0; |
2637 | 75.3k | assert(s1 >= 0); |
2638 | 75.3k | assert(n1 > 0 && n2 > 0); |
2639 | 75.3k | assert(s1 + n1 + n2 <= n); |
2640 | | /* midpoints a and b: |
2641 | | * a = s1 + n1/2 |
2642 | | * b = s1 + n1 + n2/2 = a + (n1 + n2)/2 |
2643 | | * |
2644 | | * Those may not be integers, though, because of the "/2". So we work with |
2645 | | * 2*a and 2*b instead, which are necessarily integers. It makes no |
2646 | | * difference to the outcome, since the bits in the expansion of (2*i)/n |
2647 | | * are merely shifted one position from those of i/n. |
2648 | | */ |
2649 | 75.3k | Py_ssize_t a = 2 * s1 + n1; /* 2*a */ |
2650 | 75.3k | Py_ssize_t b = a + n1 + n2; /* 2*b */ |
2651 | | /* Emulate a/n and b/n one bit a time, until bits differ. */ |
2652 | 263k | for (;;) { |
2653 | 263k | ++result; |
2654 | 263k | if (a >= n) { /* both quotient bits are 1 */ |
2655 | 97.6k | assert(b >= a); |
2656 | 97.6k | a -= n; |
2657 | 97.6k | b -= n; |
2658 | 97.6k | } |
2659 | 166k | else if (b >= n) { /* a/n bit is 0, b/n bit is 1 */ |
2660 | 75.3k | break; |
2661 | 75.3k | } /* else both quotient bits are 0 */ |
2662 | 263k | assert(a < b && b < n); |
2663 | 188k | a <<= 1; |
2664 | 188k | b <<= 1; |
2665 | 188k | } |
2666 | 75.3k | return result; |
2667 | 75.3k | } |
2668 | | |
2669 | | /* The next run has been identified, of length n2. |
2670 | | * If there's already a run on the stack, apply the "powersort" merge strategy: |
2671 | | * compute the topmost run's "power" (depth in a conceptual binary merge tree) |
2672 | | * and merge adjacent runs on the stack with greater power. See listsort.txt |
2673 | | * for more info. |
2674 | | * |
2675 | | * It's the caller's responsibility to push the new run on the stack when this |
2676 | | * returns. |
2677 | | * |
2678 | | * Returns 0 on success, -1 on error. |
2679 | | */ |
2680 | | static int |
2681 | | found_new_run(MergeState *ms, Py_ssize_t n2) |
2682 | 296k | { |
2683 | 296k | assert(ms); |
2684 | 296k | if (ms->n) { |
2685 | 75.3k | assert(ms->n > 0); |
2686 | 75.3k | struct s_slice *p = ms->pending; |
2687 | 75.3k | Py_ssize_t s1 = p[ms->n - 1].base.keys - ms->basekeys; /* start index */ |
2688 | 75.3k | Py_ssize_t n1 = p[ms->n - 1].len; |
2689 | 75.3k | int power = powerloop(s1, n1, n2, ms->listlen); |
2690 | 118k | while (ms->n > 1 && p[ms->n - 2].power > power) { |
2691 | 43.4k | if (merge_at(ms, ms->n - 2) < 0) |
2692 | 0 | return -1; |
2693 | 43.4k | } |
2694 | 75.3k | assert(ms->n < 2 || p[ms->n - 2].power < power); |
2695 | 75.3k | p[ms->n - 1].power = power; |
2696 | 75.3k | } |
2697 | 296k | return 0; |
2698 | 296k | } |
2699 | | |
2700 | | /* Regardless of invariants, merge all runs on the stack until only one |
2701 | | * remains. This is used at the end of the mergesort. |
2702 | | * |
2703 | | * Returns 0 on success, -1 on error. |
2704 | | */ |
2705 | | static int |
2706 | | merge_force_collapse(MergeState *ms) |
2707 | 221k | { |
2708 | 221k | struct s_slice *p = ms->pending; |
2709 | | |
2710 | 221k | assert(ms); |
2711 | 253k | while (ms->n > 1) { |
2712 | 31.8k | Py_ssize_t n = ms->n - 2; |
2713 | 31.8k | if (n > 0 && p[n-1].len < p[n+1].len) |
2714 | 551 | --n; |
2715 | 31.8k | if (merge_at(ms, n) < 0) |
2716 | 0 | return -1; |
2717 | 31.8k | } |
2718 | 221k | return 0; |
2719 | 221k | } |
2720 | | |
2721 | | /* Return the next minrun value to use. See listsort.txt. */ |
2722 | | Py_LOCAL_INLINE(Py_ssize_t) |
2723 | | minrun_next(MergeState *ms) |
2724 | 296k | { |
2725 | 296k | ms->mr_current += ms->listlen; |
2726 | 296k | assert(ms->mr_current >= 0); /* no overflow */ |
2727 | 296k | Py_ssize_t result = ms->mr_current >> ms->mr_e; |
2728 | 296k | ms->mr_current &= ms->mr_mask; |
2729 | 296k | return result; |
2730 | 296k | } |
2731 | | |
2732 | | /* Here we define custom comparison functions to optimize for the cases one commonly |
2733 | | * encounters in practice: homogeneous lists, often of one of the basic types. */ |
2734 | | |
2735 | | /* This struct holds the comparison function and helper functions |
2736 | | * selected in the pre-sort check. */ |
2737 | | |
2738 | | /* These are the special case compare functions. |
2739 | | * ms->key_compare will always point to one of these: */ |
2740 | | |
2741 | | /* Heterogeneous compare: default, always safe to fall back on. */ |
2742 | | static int |
2743 | | safe_object_compare(PyObject *v, PyObject *w, MergeState *ms) |
2744 | 0 | { |
2745 | | /* No assumptions necessary! */ |
2746 | 0 | return PyObject_RichCompareBool(v, w, Py_LT); |
2747 | 0 | } |
2748 | | |
2749 | | /* Homogeneous compare: safe for any two comparable objects of the same type. |
2750 | | * (ms->key_richcompare is set to ob_type->tp_richcompare in the |
2751 | | * pre-sort check.) |
2752 | | */ |
2753 | | static int |
2754 | | unsafe_object_compare(PyObject *v, PyObject *w, MergeState *ms) |
2755 | 9.03M | { |
2756 | 9.03M | PyObject *res_obj; int res; |
2757 | | |
2758 | | /* No assumptions, because we check first: */ |
2759 | 9.03M | if (Py_TYPE(v)->tp_richcompare != ms->key_richcompare) |
2760 | 0 | return PyObject_RichCompareBool(v, w, Py_LT); |
2761 | | |
2762 | 9.03M | assert(ms->key_richcompare != NULL); |
2763 | 9.03M | res_obj = (*(ms->key_richcompare))(v, w, Py_LT); |
2764 | | |
2765 | 9.03M | if (res_obj == Py_NotImplemented) { |
2766 | 0 | Py_DECREF(res_obj); |
2767 | 0 | return PyObject_RichCompareBool(v, w, Py_LT); |
2768 | 0 | } |
2769 | 9.03M | if (res_obj == NULL) |
2770 | 0 | return -1; |
2771 | | |
2772 | 9.03M | if (PyBool_Check(res_obj)) { |
2773 | 9.03M | res = (res_obj == Py_True); |
2774 | 9.03M | } |
2775 | 0 | else { |
2776 | 0 | res = PyObject_IsTrue(res_obj); |
2777 | 0 | } |
2778 | 9.03M | Py_DECREF(res_obj); |
2779 | | |
2780 | | /* Note that we can't assert |
2781 | | * res == PyObject_RichCompareBool(v, w, Py_LT); |
2782 | | * because of evil compare functions like this: |
2783 | | * lambda a, b: int(random.random() * 3) - 1) |
2784 | | * (which is actually in test_sort.py) */ |
2785 | 9.03M | return res; |
2786 | 9.03M | } |
2787 | | |
2788 | | /* Latin string compare: safe for any two latin (one byte per char) strings. */ |
2789 | | static int |
2790 | | unsafe_latin_compare(PyObject *v, PyObject *w, MergeState *ms) |
2791 | 3.43M | { |
2792 | 3.43M | Py_ssize_t len; |
2793 | 3.43M | int res; |
2794 | | |
2795 | | /* Modified from Objects/unicodeobject.c:unicode_compare, assuming: */ |
2796 | 3.43M | assert(Py_IS_TYPE(v, &PyUnicode_Type)); |
2797 | 3.43M | assert(Py_IS_TYPE(w, &PyUnicode_Type)); |
2798 | 3.43M | assert(PyUnicode_KIND(v) == PyUnicode_KIND(w)); |
2799 | 3.43M | assert(PyUnicode_KIND(v) == PyUnicode_1BYTE_KIND); |
2800 | | |
2801 | 3.43M | len = Py_MIN(PyUnicode_GET_LENGTH(v), PyUnicode_GET_LENGTH(w)); |
2802 | 3.43M | res = memcmp(PyUnicode_DATA(v), PyUnicode_DATA(w), len); |
2803 | | |
2804 | 3.43M | res = (res != 0 ? |
2805 | 3.35M | res < 0 : |
2806 | 3.43M | PyUnicode_GET_LENGTH(v) < PyUnicode_GET_LENGTH(w)); |
2807 | | |
2808 | 3.43M | assert(res == PyObject_RichCompareBool(v, w, Py_LT));; |
2809 | 3.43M | return res; |
2810 | 3.43M | } |
2811 | | |
2812 | | /* Bounded int compare: compare any two longs that fit in a single machine word. */ |
2813 | | static int |
2814 | | unsafe_long_compare(PyObject *v, PyObject *w, MergeState *ms) |
2815 | 6.08M | { |
2816 | 6.08M | PyLongObject *vl, *wl; |
2817 | 6.08M | intptr_t v0, w0; |
2818 | 6.08M | int res; |
2819 | | |
2820 | | /* Modified from Objects/longobject.c:long_compare, assuming: */ |
2821 | 6.08M | assert(Py_IS_TYPE(v, &PyLong_Type)); |
2822 | 6.08M | assert(Py_IS_TYPE(w, &PyLong_Type)); |
2823 | 6.08M | assert(_PyLong_IsCompact((PyLongObject *)v)); |
2824 | 6.08M | assert(_PyLong_IsCompact((PyLongObject *)w)); |
2825 | | |
2826 | 6.08M | vl = (PyLongObject*)v; |
2827 | 6.08M | wl = (PyLongObject*)w; |
2828 | | |
2829 | 6.08M | v0 = _PyLong_CompactValue(vl); |
2830 | 6.08M | w0 = _PyLong_CompactValue(wl); |
2831 | | |
2832 | 6.08M | res = v0 < w0; |
2833 | 6.08M | assert(res == PyObject_RichCompareBool(v, w, Py_LT)); |
2834 | 6.08M | return res; |
2835 | 6.08M | } |
2836 | | |
2837 | | /* Float compare: compare any two floats. */ |
2838 | | static int |
2839 | | unsafe_float_compare(PyObject *v, PyObject *w, MergeState *ms) |
2840 | 0 | { |
2841 | 0 | int res; |
2842 | | |
2843 | | /* Modified from Objects/floatobject.c:float_richcompare, assuming: */ |
2844 | 0 | assert(Py_IS_TYPE(v, &PyFloat_Type)); |
2845 | 0 | assert(Py_IS_TYPE(w, &PyFloat_Type)); |
2846 | |
|
2847 | 0 | res = PyFloat_AS_DOUBLE(v) < PyFloat_AS_DOUBLE(w); |
2848 | 0 | assert(res == PyObject_RichCompareBool(v, w, Py_LT)); |
2849 | 0 | return res; |
2850 | 0 | } |
2851 | | |
2852 | | /* Tuple compare: compare *any* two tuples, using |
2853 | | * ms->tuple_elem_compare to compare the first elements, which is set |
2854 | | * using the same pre-sort check as we use for ms->key_compare, |
2855 | | * but run on the list [x[0] for x in L]. This allows us to optimize compares |
2856 | | * on two levels (as long as [x[0] for x in L] is type-homogeneous.) The idea is |
2857 | | * that most tuple compares don't involve x[1:]. */ |
2858 | | static int |
2859 | | unsafe_tuple_compare(PyObject *v, PyObject *w, MergeState *ms) |
2860 | 1.40k | { |
2861 | 1.40k | PyTupleObject *vt, *wt; |
2862 | 1.40k | Py_ssize_t i, vlen, wlen; |
2863 | 1.40k | int k; |
2864 | | |
2865 | | /* Modified from Objects/tupleobject.c:tuplerichcompare, assuming: */ |
2866 | 1.40k | assert(Py_IS_TYPE(v, &PyTuple_Type)); |
2867 | 1.40k | assert(Py_IS_TYPE(w, &PyTuple_Type)); |
2868 | 1.40k | assert(Py_SIZE(v) > 0); |
2869 | 1.40k | assert(Py_SIZE(w) > 0); |
2870 | | |
2871 | 1.40k | vt = (PyTupleObject *)v; |
2872 | 1.40k | wt = (PyTupleObject *)w; |
2873 | | |
2874 | 1.40k | vlen = Py_SIZE(vt); |
2875 | 1.40k | wlen = Py_SIZE(wt); |
2876 | | |
2877 | 1.42k | for (i = 0; i < vlen && i < wlen; i++) { |
2878 | 1.42k | k = PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_EQ); |
2879 | 1.42k | if (k < 0) |
2880 | 0 | return -1; |
2881 | 1.42k | if (!k) |
2882 | 1.40k | break; |
2883 | 1.42k | } |
2884 | | |
2885 | 1.40k | if (i >= vlen || i >= wlen) |
2886 | 0 | return vlen < wlen; |
2887 | | |
2888 | 1.40k | if (i == 0) |
2889 | 1.39k | return ms->tuple_elem_compare(vt->ob_item[i], wt->ob_item[i], ms); |
2890 | 12 | else |
2891 | 12 | return PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_LT); |
2892 | 1.40k | } |
2893 | | |
2894 | | /* An adaptive, stable, natural mergesort. See listsort.txt. |
2895 | | * Returns Py_None on success, NULL on error. Even in case of error, the |
2896 | | * list will be some permutation of its input state (nothing is lost or |
2897 | | * duplicated). |
2898 | | */ |
2899 | | /*[clinic input] |
2900 | | @permit_long_docstring_body |
2901 | | @critical_section |
2902 | | list.sort |
2903 | | |
2904 | | * |
2905 | | key as keyfunc: object = None |
2906 | | reverse: bool = False |
2907 | | |
2908 | | Sort the list in ascending order and return None. |
2909 | | |
2910 | | The sort is in-place (i.e. the list itself is modified) and stable (i.e. the |
2911 | | order of two equal elements is maintained). |
2912 | | |
2913 | | If a key function is given, apply it once to each list item and sort them, |
2914 | | ascending or descending, according to their function values. |
2915 | | |
2916 | | The reverse flag can be set to sort in descending order. |
2917 | | [clinic start generated code]*/ |
2918 | | |
2919 | | static PyObject * |
2920 | | list_sort_impl(PyListObject *self, PyObject *keyfunc, int reverse) |
2921 | | /*[clinic end generated code: output=57b9f9c5e23fbe42 input=e4f6b6069181ad7d]*/ |
2922 | 871k | { |
2923 | 871k | MergeState ms; |
2924 | 871k | Py_ssize_t nremaining; |
2925 | 871k | Py_ssize_t minrun; |
2926 | 871k | sortslice lo; |
2927 | 871k | Py_ssize_t saved_ob_size, saved_allocated; |
2928 | 871k | PyObject **saved_ob_item; |
2929 | 871k | PyObject **final_ob_item; |
2930 | 871k | PyObject *result = NULL; /* guilty until proved innocent */ |
2931 | 871k | Py_ssize_t i; |
2932 | 871k | PyObject **keys; |
2933 | | |
2934 | 871k | assert(self != NULL); |
2935 | 871k | assert(PyList_Check(self)); |
2936 | 871k | if (keyfunc == Py_None) |
2937 | 214k | keyfunc = NULL; |
2938 | | |
2939 | | /* The list is temporarily made empty, so that mutations performed |
2940 | | * by comparison functions can't affect the slice of memory we're |
2941 | | * sorting (allowing mutations during sorting is a core-dump |
2942 | | * factory, since ob_item may change). |
2943 | | */ |
2944 | 871k | saved_ob_size = Py_SIZE(self); |
2945 | 871k | saved_ob_item = self->ob_item; |
2946 | 871k | saved_allocated = self->allocated; |
2947 | 871k | Py_SET_SIZE(self, 0); |
2948 | 871k | FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item, NULL); |
2949 | 871k | self->allocated = -1; /* any operation will reset it to >= 0 */ |
2950 | | |
2951 | 871k | if (keyfunc == NULL) { |
2952 | 239k | keys = NULL; |
2953 | 239k | lo.keys = saved_ob_item; |
2954 | 239k | lo.values = NULL; |
2955 | 239k | } |
2956 | 631k | else { |
2957 | 631k | if (saved_ob_size < MERGESTATE_TEMP_SIZE/2) |
2958 | | /* Leverage stack space we allocated but won't otherwise use */ |
2959 | 626k | keys = &ms.temparray[saved_ob_size+1]; |
2960 | 4.22k | else { |
2961 | 4.22k | keys = PyMem_Malloc(sizeof(PyObject *) * saved_ob_size); |
2962 | 4.22k | if (keys == NULL) { |
2963 | 0 | PyErr_NoMemory(); |
2964 | 0 | goto keyfunc_fail; |
2965 | 0 | } |
2966 | 4.22k | } |
2967 | | |
2968 | 5.44M | for (i = 0; i < saved_ob_size ; i++) { |
2969 | 4.81M | keys[i] = PyObject_CallOneArg(keyfunc, saved_ob_item[i]); |
2970 | 4.81M | if (keys[i] == NULL) { |
2971 | 0 | for (i=i-1 ; i>=0 ; i--) |
2972 | 0 | Py_DECREF(keys[i]); |
2973 | 0 | if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2) |
2974 | 0 | PyMem_Free(keys); |
2975 | 0 | goto keyfunc_fail; |
2976 | 0 | } |
2977 | 4.81M | } |
2978 | | |
2979 | 631k | lo.keys = keys; |
2980 | 631k | lo.values = saved_ob_item; |
2981 | 631k | } |
2982 | | |
2983 | | |
2984 | | /* The pre-sort check: here's where we decide which compare function to use. |
2985 | | * How much optimization is safe? We test for homogeneity with respect to |
2986 | | * several properties that are expensive to check at compare-time, and |
2987 | | * set ms appropriately. */ |
2988 | 871k | if (saved_ob_size > 1) { |
2989 | | /* Assume the first element is representative of the whole list. */ |
2990 | 221k | int keys_are_in_tuples = (Py_IS_TYPE(lo.keys[0], &PyTuple_Type) && |
2991 | 82 | Py_SIZE(lo.keys[0]) > 0); |
2992 | | |
2993 | 221k | PyTypeObject* key_type = (keys_are_in_tuples ? |
2994 | 82 | Py_TYPE(PyTuple_GET_ITEM(lo.keys[0], 0)) : |
2995 | 221k | Py_TYPE(lo.keys[0])); |
2996 | | |
2997 | 221k | int keys_are_all_same_type = 1; |
2998 | 221k | int strings_are_latin = 1; |
2999 | 221k | int ints_are_bounded = 1; |
3000 | | |
3001 | | /* Prove that assumption by checking every key. */ |
3002 | 6.39M | for (i=0; i < saved_ob_size; i++) { |
3003 | | |
3004 | 6.17M | if (keys_are_in_tuples && |
3005 | 671 | !(Py_IS_TYPE(lo.keys[i], &PyTuple_Type) && Py_SIZE(lo.keys[i]) != 0)) { |
3006 | 0 | keys_are_in_tuples = 0; |
3007 | 0 | keys_are_all_same_type = 0; |
3008 | 0 | break; |
3009 | 0 | } |
3010 | | |
3011 | | /* Note: for lists of tuples, key is the first element of the tuple |
3012 | | * lo.keys[i], not lo.keys[i] itself! We verify type-homogeneity |
3013 | | * for lists of tuples in the if-statement directly above. */ |
3014 | 6.17M | PyObject *key = (keys_are_in_tuples ? |
3015 | 671 | PyTuple_GET_ITEM(lo.keys[i], 0) : |
3016 | 6.17M | lo.keys[i]); |
3017 | | |
3018 | 6.17M | if (!Py_IS_TYPE(key, key_type)) { |
3019 | 0 | keys_are_all_same_type = 0; |
3020 | | /* If keys are in tuple we must loop over the whole list to make |
3021 | | sure all items are tuples */ |
3022 | 0 | if (!keys_are_in_tuples) { |
3023 | 0 | break; |
3024 | 0 | } |
3025 | 0 | } |
3026 | | |
3027 | 6.17M | if (keys_are_all_same_type) { |
3028 | 6.17M | if (key_type == &PyLong_Type && |
3029 | 4.23M | ints_are_bounded && |
3030 | 2.56M | !_PyLong_IsCompact((PyLongObject *)key)) { |
3031 | | |
3032 | 8.05k | ints_are_bounded = 0; |
3033 | 8.05k | } |
3034 | 6.16M | else if (key_type == &PyUnicode_Type && |
3035 | 774k | strings_are_latin && |
3036 | 703k | PyUnicode_KIND(key) != PyUnicode_1BYTE_KIND) { |
3037 | | |
3038 | 5.88k | strings_are_latin = 0; |
3039 | 5.88k | } |
3040 | 6.17M | } |
3041 | 6.17M | } |
3042 | | |
3043 | | /* Choose the best compare, given what we now know about the keys. */ |
3044 | 221k | if (keys_are_all_same_type) { |
3045 | | |
3046 | 221k | if (key_type == &PyUnicode_Type && strings_are_latin) { |
3047 | 21.8k | ms.key_compare = unsafe_latin_compare; |
3048 | 21.8k | } |
3049 | 199k | else if (key_type == &PyLong_Type && ints_are_bounded) { |
3050 | 86.9k | ms.key_compare = unsafe_long_compare; |
3051 | 86.9k | } |
3052 | 112k | else if (key_type == &PyFloat_Type) { |
3053 | 0 | ms.key_compare = unsafe_float_compare; |
3054 | 0 | } |
3055 | 112k | else if ((ms.key_richcompare = key_type->tp_richcompare) != NULL) { |
3056 | 112k | ms.key_compare = unsafe_object_compare; |
3057 | 112k | } |
3058 | 0 | else { |
3059 | 0 | ms.key_compare = safe_object_compare; |
3060 | 0 | } |
3061 | 221k | } |
3062 | 0 | else { |
3063 | 0 | ms.key_compare = safe_object_compare; |
3064 | 0 | } |
3065 | | |
3066 | 221k | if (keys_are_in_tuples) { |
3067 | | /* Make sure we're not dealing with tuples of tuples |
3068 | | * (remember: here, key_type refers list [key[0] for key in keys]) */ |
3069 | 82 | if (key_type == &PyTuple_Type) { |
3070 | 0 | ms.tuple_elem_compare = safe_object_compare; |
3071 | 0 | } |
3072 | 82 | else { |
3073 | 82 | ms.tuple_elem_compare = ms.key_compare; |
3074 | 82 | } |
3075 | | |
3076 | 82 | ms.key_compare = unsafe_tuple_compare; |
3077 | 82 | } |
3078 | 221k | } |
3079 | | /* End of pre-sort check: ms is now set properly! */ |
3080 | | |
3081 | 871k | merge_init(&ms, saved_ob_size, keys != NULL, &lo); |
3082 | | |
3083 | 871k | nremaining = saved_ob_size; |
3084 | 871k | if (nremaining < 2) |
3085 | 649k | goto succeed; |
3086 | | |
3087 | | /* Reverse sort stability achieved by initially reversing the list, |
3088 | | applying a stable forward sort, then reversing the final result. */ |
3089 | 221k | if (reverse) { |
3090 | 4 | if (keys != NULL) |
3091 | 0 | reverse_slice(&keys[0], &keys[saved_ob_size]); |
3092 | 4 | reverse_slice(&saved_ob_item[0], &saved_ob_item[saved_ob_size]); |
3093 | 4 | } |
3094 | | |
3095 | | /* March over the array once, left to right, finding natural runs, |
3096 | | * and extending short natural runs to minrun elements. |
3097 | | */ |
3098 | 296k | do { |
3099 | 296k | Py_ssize_t n; |
3100 | | |
3101 | | /* Identify next run. */ |
3102 | 296k | n = count_run(&ms, &lo, nremaining); |
3103 | 296k | if (n < 0) |
3104 | 0 | goto fail; |
3105 | | /* If short, extend to min(minrun, nremaining). */ |
3106 | 296k | minrun = minrun_next(&ms); |
3107 | 296k | if (n < minrun) { |
3108 | 141k | const Py_ssize_t force = nremaining <= minrun ? |
3109 | 86.3k | nremaining : minrun; |
3110 | 141k | if (binarysort(&ms, &lo, force, n) < 0) |
3111 | 0 | goto fail; |
3112 | 141k | n = force; |
3113 | 141k | } |
3114 | | /* Maybe merge pending runs. */ |
3115 | 296k | assert(ms.n == 0 || ms.pending[ms.n -1].base.keys + |
3116 | 296k | ms.pending[ms.n-1].len == lo.keys); |
3117 | 296k | if (found_new_run(&ms, n) < 0) |
3118 | 0 | goto fail; |
3119 | | /* Push new run on stack. */ |
3120 | 296k | assert(ms.n < MAX_MERGE_PENDING); |
3121 | 296k | ms.pending[ms.n].base = lo; |
3122 | 296k | ms.pending[ms.n].len = n; |
3123 | 296k | ++ms.n; |
3124 | | /* Advance to find next run. */ |
3125 | 296k | sortslice_advance(&lo, n); |
3126 | 296k | nremaining -= n; |
3127 | 296k | } while (nremaining); |
3128 | | |
3129 | 221k | if (merge_force_collapse(&ms) < 0) |
3130 | 0 | goto fail; |
3131 | 221k | assert(ms.n == 1); |
3132 | 221k | assert(keys == NULL |
3133 | 221k | ? ms.pending[0].base.keys == saved_ob_item |
3134 | 221k | : ms.pending[0].base.keys == &keys[0]); |
3135 | 221k | assert(ms.pending[0].len == saved_ob_size); |
3136 | 221k | lo = ms.pending[0].base; |
3137 | | |
3138 | 871k | succeed: |
3139 | 871k | result = Py_None; |
3140 | 871k | fail: |
3141 | 871k | if (keys != NULL) { |
3142 | 5.44M | for (i = 0; i < saved_ob_size; i++) |
3143 | 4.81M | Py_DECREF(keys[i]); |
3144 | 631k | if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2) |
3145 | 4.22k | PyMem_Free(keys); |
3146 | 631k | } |
3147 | | |
3148 | 871k | if (self->allocated != -1 && result != NULL) { |
3149 | | /* The user mucked with the list during the sort, |
3150 | | * and we don't already have another error to report. |
3151 | | */ |
3152 | 0 | PyErr_SetString(PyExc_ValueError, "list modified during sort"); |
3153 | 0 | result = NULL; |
3154 | 0 | } |
3155 | | |
3156 | 871k | if (reverse && saved_ob_size > 1) |
3157 | 4 | reverse_slice(saved_ob_item, saved_ob_item + saved_ob_size); |
3158 | | |
3159 | 871k | merge_freemem(&ms); |
3160 | | |
3161 | 871k | keyfunc_fail: |
3162 | 871k | final_ob_item = self->ob_item; |
3163 | 871k | i = Py_SIZE(self); |
3164 | 871k | Py_SET_SIZE(self, saved_ob_size); |
3165 | 871k | FT_ATOMIC_STORE_PTR_RELEASE(self->ob_item, saved_ob_item); |
3166 | 871k | FT_ATOMIC_STORE_SSIZE_RELAXED(self->allocated, saved_allocated); |
3167 | 871k | if (final_ob_item != NULL) { |
3168 | | /* we cannot use list_clear() for this because it does not |
3169 | | guarantee that the list is really empty when it returns */ |
3170 | 0 | while (--i >= 0) { |
3171 | 0 | Py_XDECREF(final_ob_item[i]); |
3172 | 0 | } |
3173 | | #ifdef Py_GIL_DISABLED |
3174 | | ensure_shared_on_resize(self); |
3175 | | bool use_qsbr = _PyObject_GC_IS_SHARED(self); |
3176 | | #else |
3177 | 0 | bool use_qsbr = false; |
3178 | 0 | #endif |
3179 | 0 | free_list_items(final_ob_item, use_qsbr); |
3180 | 0 | } |
3181 | 871k | return Py_XNewRef(result); |
3182 | 871k | } |
3183 | | #undef IFLT |
3184 | | #undef ISLT |
3185 | | |
3186 | | int |
3187 | | PyList_Sort(PyObject *v) |
3188 | 25.8k | { |
3189 | 25.8k | if (v == NULL || !PyList_Check(v)) { |
3190 | 0 | PyErr_BadInternalCall(); |
3191 | 0 | return -1; |
3192 | 0 | } |
3193 | 25.8k | Py_BEGIN_CRITICAL_SECTION(v); |
3194 | 25.8k | v = list_sort_impl((PyListObject *)v, NULL, 0); |
3195 | 25.8k | Py_END_CRITICAL_SECTION(); |
3196 | 25.8k | if (v == NULL) |
3197 | 0 | return -1; |
3198 | 25.8k | Py_DECREF(v); |
3199 | 25.8k | return 0; |
3200 | 25.8k | } |
3201 | | |
3202 | | /*[clinic input] |
3203 | | @critical_section |
3204 | | list.reverse |
3205 | | |
3206 | | Reverse *IN PLACE*. |
3207 | | [clinic start generated code]*/ |
3208 | | |
3209 | | static PyObject * |
3210 | | list_reverse_impl(PyListObject *self) |
3211 | | /*[clinic end generated code: output=482544fc451abea9 input=04ac8e0c6a66e4d9]*/ |
3212 | 0 | { |
3213 | 0 | if (Py_SIZE(self) > 1) |
3214 | 0 | reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self)); |
3215 | 0 | Py_RETURN_NONE; |
3216 | 0 | } |
3217 | | |
3218 | | int |
3219 | | PyList_Reverse(PyObject *v) |
3220 | 50 | { |
3221 | 50 | PyListObject *self = (PyListObject *)v; |
3222 | | |
3223 | 50 | if (v == NULL || !PyList_Check(v)) { |
3224 | 0 | PyErr_BadInternalCall(); |
3225 | 0 | return -1; |
3226 | 0 | } |
3227 | 50 | Py_BEGIN_CRITICAL_SECTION(self); |
3228 | 50 | if (Py_SIZE(self) > 1) { |
3229 | 50 | reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self)); |
3230 | 50 | } |
3231 | 50 | Py_END_CRITICAL_SECTION() |
3232 | 50 | return 0; |
3233 | 50 | } |
3234 | | |
3235 | | PyObject * |
3236 | | PyList_AsTuple(PyObject *v) |
3237 | 263k | { |
3238 | 263k | if (v == NULL || !PyList_Check(v)) { |
3239 | 0 | PyErr_BadInternalCall(); |
3240 | 0 | return NULL; |
3241 | 0 | } |
3242 | 263k | PyObject *ret; |
3243 | 263k | PyListObject *self = (PyListObject *)v; |
3244 | 263k | Py_BEGIN_CRITICAL_SECTION(self); |
3245 | 263k | ret = PyTuple_FromArray(self->ob_item, Py_SIZE(v)); |
3246 | 263k | Py_END_CRITICAL_SECTION(); |
3247 | 263k | return ret; |
3248 | 263k | } |
3249 | | |
3250 | | PyObject * |
3251 | | _PyList_AsTupleAndClear(PyListObject *self) |
3252 | 46 | { |
3253 | 46 | assert(self != NULL); |
3254 | 46 | PyObject *ret; |
3255 | 46 | if (self->ob_item == NULL) { |
3256 | 0 | return PyTuple_New(0); |
3257 | 0 | } |
3258 | 46 | Py_BEGIN_CRITICAL_SECTION(self); |
3259 | 46 | PyObject **items = self->ob_item; |
3260 | 46 | Py_ssize_t size = Py_SIZE(self); |
3261 | 46 | self->ob_item = NULL; |
3262 | 46 | Py_SET_SIZE(self, 0); |
3263 | 46 | ret = _PyTuple_FromArraySteal(items, size); |
3264 | 46 | free_list_items(items, false); |
3265 | 46 | Py_END_CRITICAL_SECTION(); |
3266 | 46 | return ret; |
3267 | 46 | } |
3268 | | |
3269 | | PyObject * |
3270 | | _PyList_FromStackRefStealOnSuccess(const _PyStackRef *src, Py_ssize_t n) |
3271 | 200M | { |
3272 | 200M | if (n == 0) { |
3273 | 179M | return PyList_New(0); |
3274 | 179M | } |
3275 | | |
3276 | 20.7M | PyListObject *list = (PyListObject *)PyList_New(n); |
3277 | 20.7M | if (list == NULL) { |
3278 | 0 | return NULL; |
3279 | 0 | } |
3280 | | |
3281 | 20.7M | PyObject **dst = list->ob_item; |
3282 | 54.7M | for (Py_ssize_t i = 0; i < n; i++) { |
3283 | 33.9M | dst[i] = PyStackRef_AsPyObjectSteal(src[i]); |
3284 | 33.9M | } |
3285 | | |
3286 | 20.7M | return (PyObject *)list; |
3287 | 20.7M | } |
3288 | | |
3289 | | /*[clinic input] |
3290 | | list.index |
3291 | | |
3292 | | value: object |
3293 | | start: slice_index(accept={int}) = 0 |
3294 | | stop: slice_index(accept={int}, c_default="PY_SSIZE_T_MAX") = sys.maxsize |
3295 | | / |
3296 | | |
3297 | | Return first index of value. |
3298 | | |
3299 | | Raises ValueError if the value is not present. |
3300 | | [clinic start generated code]*/ |
3301 | | |
3302 | | static PyObject * |
3303 | | list_index_impl(PyListObject *self, PyObject *value, Py_ssize_t start, |
3304 | | Py_ssize_t stop) |
3305 | | /*[clinic end generated code: output=ec51b88787e4e481 input=40ec5826303a0eb1]*/ |
3306 | 0 | { |
3307 | 0 | if (start < 0) { |
3308 | 0 | start += Py_SIZE(self); |
3309 | 0 | if (start < 0) |
3310 | 0 | start = 0; |
3311 | 0 | } |
3312 | 0 | if (stop < 0) { |
3313 | 0 | stop += Py_SIZE(self); |
3314 | 0 | if (stop < 0) |
3315 | 0 | stop = 0; |
3316 | 0 | } |
3317 | 0 | for (Py_ssize_t i = start; i < stop; i++) { |
3318 | 0 | PyObject *obj = list_get_item_ref(self, i); |
3319 | 0 | if (obj == NULL) { |
3320 | | // out-of-bounds |
3321 | 0 | break; |
3322 | 0 | } |
3323 | 0 | int cmp = PyObject_RichCompareBool(obj, value, Py_EQ); |
3324 | 0 | Py_DECREF(obj); |
3325 | 0 | if (cmp > 0) |
3326 | 0 | return PyLong_FromSsize_t(i); |
3327 | 0 | else if (cmp < 0) |
3328 | 0 | return NULL; |
3329 | 0 | } |
3330 | 0 | PyErr_SetString(PyExc_ValueError, "list.index(x): x not in list"); |
3331 | 0 | return NULL; |
3332 | 0 | } |
3333 | | |
3334 | | /*[clinic input] |
3335 | | list.count |
3336 | | |
3337 | | value: object |
3338 | | / |
3339 | | |
3340 | | Return number of occurrences of value. |
3341 | | [clinic start generated code]*/ |
3342 | | |
3343 | | static PyObject * |
3344 | | list_count_impl(PyListObject *self, PyObject *value) |
3345 | | /*[clinic end generated code: output=eff66f14aef2df86 input=3bdc3a5e6f749565]*/ |
3346 | 0 | { |
3347 | 0 | Py_ssize_t count = 0; |
3348 | 0 | for (Py_ssize_t i = 0; ; i++) { |
3349 | 0 | PyObject *obj = list_get_item_ref(self, i); |
3350 | 0 | if (obj == NULL) { |
3351 | | // out-of-bounds |
3352 | 0 | break; |
3353 | 0 | } |
3354 | 0 | if (obj == value) { |
3355 | 0 | count++; |
3356 | 0 | Py_DECREF(obj); |
3357 | 0 | continue; |
3358 | 0 | } |
3359 | 0 | int cmp = PyObject_RichCompareBool(obj, value, Py_EQ); |
3360 | 0 | Py_DECREF(obj); |
3361 | 0 | if (cmp > 0) |
3362 | 0 | count++; |
3363 | 0 | else if (cmp < 0) |
3364 | 0 | return NULL; |
3365 | 0 | } |
3366 | 0 | return PyLong_FromSsize_t(count); |
3367 | 0 | } |
3368 | | |
3369 | | /*[clinic input] |
3370 | | @critical_section |
3371 | | list.remove |
3372 | | |
3373 | | value: object |
3374 | | / |
3375 | | |
3376 | | Remove first occurrence of value. |
3377 | | |
3378 | | Raises ValueError if the value is not present. |
3379 | | [clinic start generated code]*/ |
3380 | | |
3381 | | static PyObject * |
3382 | | list_remove_impl(PyListObject *self, PyObject *value) |
3383 | | /*[clinic end generated code: output=b9b76a6633b18778 input=26c813dbb95aa93b]*/ |
3384 | 13.0k | { |
3385 | 13.0k | Py_ssize_t i; |
3386 | | |
3387 | 13.0k | for (i = 0; i < Py_SIZE(self); i++) { |
3388 | 13.0k | PyObject *obj = self->ob_item[i]; |
3389 | 13.0k | Py_INCREF(obj); |
3390 | 13.0k | int cmp = PyObject_RichCompareBool(obj, value, Py_EQ); |
3391 | 13.0k | Py_DECREF(obj); |
3392 | 13.0k | if (cmp > 0) { |
3393 | 13.0k | if (list_ass_slice_lock_held(self, i, i+1, NULL) == 0) |
3394 | 13.0k | Py_RETURN_NONE; |
3395 | 0 | return NULL; |
3396 | 13.0k | } |
3397 | 10 | else if (cmp < 0) |
3398 | 0 | return NULL; |
3399 | 13.0k | } |
3400 | 2 | PyErr_SetString(PyExc_ValueError, "list.remove(x): x not in list"); |
3401 | 2 | return NULL; |
3402 | 13.0k | } |
3403 | | |
3404 | | static int |
3405 | | list_traverse(PyObject *self, visitproc visit, void *arg) |
3406 | 56.5M | { |
3407 | 56.5M | PyListObject *o = (PyListObject *)self; |
3408 | 56.5M | Py_ssize_t i; |
3409 | | |
3410 | 189M | for (i = Py_SIZE(o); --i >= 0; ) |
3411 | 132M | Py_VISIT(o->ob_item[i]); |
3412 | 56.5M | return 0; |
3413 | 56.5M | } |
3414 | | |
3415 | | static PyObject * |
3416 | | list_richcompare_impl(PyObject *v, PyObject *w, int op) |
3417 | 218k | { |
3418 | 218k | PyListObject *vl, *wl; |
3419 | 218k | Py_ssize_t i; |
3420 | | |
3421 | 218k | if (!PyList_Check(v) || !PyList_Check(w)) |
3422 | 3.32k | Py_RETURN_NOTIMPLEMENTED; |
3423 | | |
3424 | 215k | vl = (PyListObject *)v; |
3425 | 215k | wl = (PyListObject *)w; |
3426 | | |
3427 | 215k | if (Py_SIZE(vl) != Py_SIZE(wl) && (op == Py_EQ || op == Py_NE)) { |
3428 | | /* Shortcut: if the lengths differ, the lists differ */ |
3429 | 106k | if (op == Py_EQ) |
3430 | 106k | Py_RETURN_FALSE; |
3431 | 0 | else |
3432 | 0 | Py_RETURN_TRUE; |
3433 | 106k | } |
3434 | | |
3435 | | /* Search for the first index where items are different */ |
3436 | 110k | for (i = 0; i < Py_SIZE(vl) && i < Py_SIZE(wl); i++) { |
3437 | 97.5k | PyObject *vitem = vl->ob_item[i]; |
3438 | 97.5k | PyObject *witem = wl->ob_item[i]; |
3439 | 97.5k | if (vitem == witem) { |
3440 | 160 | continue; |
3441 | 160 | } |
3442 | | |
3443 | 97.4k | Py_INCREF(vitem); |
3444 | 97.4k | Py_INCREF(witem); |
3445 | 97.4k | int k = PyObject_RichCompareBool(vitem, witem, Py_EQ); |
3446 | 97.4k | Py_DECREF(vitem); |
3447 | 97.4k | Py_DECREF(witem); |
3448 | 97.4k | if (k < 0) |
3449 | 0 | return NULL; |
3450 | 97.4k | if (!k) |
3451 | 95.6k | break; |
3452 | 97.4k | } |
3453 | | |
3454 | 108k | if (i >= Py_SIZE(vl) || i >= Py_SIZE(wl)) { |
3455 | | /* No more items to compare -- compare sizes */ |
3456 | 13.3k | Py_RETURN_RICHCOMPARE(Py_SIZE(vl), Py_SIZE(wl), op); |
3457 | 13.3k | } |
3458 | | |
3459 | | /* We have an item that differs -- shortcuts for EQ/NE */ |
3460 | 95.6k | if (op == Py_EQ) { |
3461 | 95.6k | Py_RETURN_FALSE; |
3462 | 95.6k | } |
3463 | 24 | if (op == Py_NE) { |
3464 | 24 | Py_RETURN_TRUE; |
3465 | 24 | } |
3466 | | |
3467 | | /* Compare the final item again using the proper operator */ |
3468 | 0 | PyObject *vitem = vl->ob_item[i]; |
3469 | 0 | PyObject *witem = wl->ob_item[i]; |
3470 | 0 | Py_INCREF(vitem); |
3471 | 0 | Py_INCREF(witem); |
3472 | 0 | PyObject *result = PyObject_RichCompare(vl->ob_item[i], wl->ob_item[i], op); |
3473 | 0 | Py_DECREF(vitem); |
3474 | 0 | Py_DECREF(witem); |
3475 | 0 | return result; |
3476 | 24 | } |
3477 | | |
3478 | | static PyObject * |
3479 | | list_richcompare(PyObject *v, PyObject *w, int op) |
3480 | 218k | { |
3481 | 218k | PyObject *ret; |
3482 | 218k | Py_BEGIN_CRITICAL_SECTION2(v, w); |
3483 | 218k | ret = list_richcompare_impl(v, w, op); |
3484 | 218k | Py_END_CRITICAL_SECTION2() |
3485 | 218k | return ret; |
3486 | 218k | } |
3487 | | |
3488 | | /*[clinic input] |
3489 | | list.__init__ |
3490 | | |
3491 | | iterable: object(c_default="NULL") = () |
3492 | | / |
3493 | | |
3494 | | Built-in mutable sequence. |
3495 | | |
3496 | | If no argument is given, the constructor creates a new empty list. |
3497 | | The argument must be an iterable if specified. |
3498 | | [clinic start generated code]*/ |
3499 | | |
3500 | | static int |
3501 | | list___init___impl(PyListObject *self, PyObject *iterable) |
3502 | | /*[clinic end generated code: output=0f3c21379d01de48 input=b3f3fe7206af8f6b]*/ |
3503 | 21.6M | { |
3504 | | /* Verify list invariants established by PyType_GenericAlloc() */ |
3505 | 21.6M | assert(0 <= Py_SIZE(self)); |
3506 | 21.6M | assert(Py_SIZE(self) <= self->allocated || self->allocated == -1); |
3507 | 21.6M | assert(self->ob_item != NULL || |
3508 | 21.6M | self->allocated == 0 || self->allocated == -1); |
3509 | | |
3510 | | /* Empty previous contents */ |
3511 | 21.6M | if (self->ob_item != NULL) { |
3512 | 0 | Py_BEGIN_CRITICAL_SECTION(self); |
3513 | 0 | list_clear(self); |
3514 | 0 | Py_END_CRITICAL_SECTION(); |
3515 | 0 | } |
3516 | 21.6M | if (iterable != NULL) { |
3517 | 10.4M | if (_list_extend(self, iterable) < 0) { |
3518 | 0 | return -1; |
3519 | 0 | } |
3520 | 10.4M | } |
3521 | 21.6M | return 0; |
3522 | 21.6M | } |
3523 | | |
3524 | | static PyObject * |
3525 | | list_vectorcall(PyObject *type, PyObject * const*args, |
3526 | | size_t nargsf, PyObject *kwnames) |
3527 | 10.4M | { |
3528 | 10.4M | if (!_PyArg_NoKwnames("list", kwnames)) { |
3529 | 0 | return NULL; |
3530 | 0 | } |
3531 | 10.4M | Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); |
3532 | 10.4M | if (!_PyArg_CheckPositional("list", nargs, 0, 1)) { |
3533 | 0 | return NULL; |
3534 | 0 | } |
3535 | | |
3536 | 10.4M | PyObject *list = PyType_GenericAlloc(_PyType_CAST(type), 0); |
3537 | 10.4M | if (list == NULL) { |
3538 | 0 | return NULL; |
3539 | 0 | } |
3540 | 10.4M | if (nargs) { |
3541 | 10.4M | if (list___init___impl((PyListObject *)list, args[0])) { |
3542 | 0 | Py_DECREF(list); |
3543 | 0 | return NULL; |
3544 | 0 | } |
3545 | 10.4M | } |
3546 | 10.4M | return list; |
3547 | 10.4M | } |
3548 | | |
3549 | | |
3550 | | /*[clinic input] |
3551 | | list.__sizeof__ |
3552 | | |
3553 | | Return the size of the list in memory, in bytes. |
3554 | | [clinic start generated code]*/ |
3555 | | |
3556 | | static PyObject * |
3557 | | list___sizeof___impl(PyListObject *self) |
3558 | | /*[clinic end generated code: output=3417541f95f9a53e input=b8030a5d5ce8a187]*/ |
3559 | 0 | { |
3560 | 0 | size_t res = _PyObject_SIZE(Py_TYPE(self)); |
3561 | 0 | Py_ssize_t allocated = FT_ATOMIC_LOAD_SSIZE_RELAXED(self->allocated); |
3562 | 0 | res += (size_t)allocated * sizeof(void*); |
3563 | 0 | return PyLong_FromSize_t(res); |
3564 | 0 | } |
3565 | | |
3566 | | static PyObject *list_iter(PyObject *seq); |
3567 | | static PyObject *list_subscript(PyObject*, PyObject*); |
3568 | | |
3569 | | static PyMethodDef list_methods[] = { |
3570 | | {"__getitem__", list_subscript, METH_O|METH_COEXIST, |
3571 | | PyDoc_STR("__getitem__($self, index, /)\n--\n\nReturn self[index].")}, |
3572 | | LIST___REVERSED___METHODDEF |
3573 | | LIST___SIZEOF___METHODDEF |
3574 | | PY_LIST_CLEAR_METHODDEF |
3575 | | LIST_COPY_METHODDEF |
3576 | | LIST_APPEND_METHODDEF |
3577 | | LIST_INSERT_METHODDEF |
3578 | | LIST_EXTEND_METHODDEF |
3579 | | LIST_POP_METHODDEF |
3580 | | LIST_REMOVE_METHODDEF |
3581 | | LIST_INDEX_METHODDEF |
3582 | | LIST_COUNT_METHODDEF |
3583 | | LIST_REVERSE_METHODDEF |
3584 | | LIST_SORT_METHODDEF |
3585 | | {"__class_getitem__", Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")}, |
3586 | | {NULL, NULL} /* sentinel */ |
3587 | | }; |
3588 | | |
3589 | | static PySequenceMethods list_as_sequence = { |
3590 | | list_length, /* sq_length */ |
3591 | | list_concat, /* sq_concat */ |
3592 | | list_repeat, /* sq_repeat */ |
3593 | | list_item, /* sq_item */ |
3594 | | 0, /* sq_slice */ |
3595 | | list_ass_item, /* sq_ass_item */ |
3596 | | 0, /* sq_ass_slice */ |
3597 | | list_contains, /* sq_contains */ |
3598 | | list_inplace_concat, /* sq_inplace_concat */ |
3599 | | list_inplace_repeat, /* sq_inplace_repeat */ |
3600 | | }; |
3601 | | |
3602 | | static inline PyObject * |
3603 | | list_slice_step_lock_held(PyListObject *a, Py_ssize_t start, Py_ssize_t step, Py_ssize_t len) |
3604 | 248 | { |
3605 | 248 | PyListObject *np = (PyListObject *)list_new_prealloc(len); |
3606 | 248 | if (np == NULL) { |
3607 | 0 | return NULL; |
3608 | 0 | } |
3609 | 248 | size_t cur; |
3610 | 248 | Py_ssize_t i; |
3611 | 248 | PyObject **src = a->ob_item; |
3612 | 248 | PyObject **dest = np->ob_item; |
3613 | 1.65k | for (cur = start, i = 0; i < len; |
3614 | 1.40k | cur += (size_t)step, i++) { |
3615 | 1.40k | PyObject *v = src[cur]; |
3616 | 1.40k | dest[i] = Py_NewRef(v); |
3617 | 1.40k | } |
3618 | 248 | Py_SET_SIZE(np, len); |
3619 | 248 | return (PyObject *)np; |
3620 | 248 | } |
3621 | | |
3622 | | static PyObject * |
3623 | | list_slice_wrap(PyListObject *aa, Py_ssize_t start, Py_ssize_t stop, Py_ssize_t step) |
3624 | 4.87M | { |
3625 | 4.87M | PyObject *res = NULL; |
3626 | 4.87M | Py_BEGIN_CRITICAL_SECTION(aa); |
3627 | 4.87M | Py_ssize_t len = PySlice_AdjustIndices(Py_SIZE(aa), &start, &stop, step); |
3628 | 4.87M | if (len <= 0) { |
3629 | 475k | res = PyList_New(0); |
3630 | 475k | } |
3631 | 4.39M | else if (step == 1) { |
3632 | 4.39M | res = list_slice_lock_held(aa, start, stop); |
3633 | 4.39M | } |
3634 | 248 | else { |
3635 | 248 | res = list_slice_step_lock_held(aa, start, step, len); |
3636 | 248 | } |
3637 | 4.87M | Py_END_CRITICAL_SECTION(); |
3638 | 4.87M | return res; |
3639 | 4.87M | } |
3640 | | |
3641 | | static inline PyObject* |
3642 | | list_slice_subscript(PyObject* self, PyObject* item) |
3643 | 4.87M | { |
3644 | 4.87M | assert(PyList_Check(self)); |
3645 | 4.87M | assert(PySlice_Check(item)); |
3646 | 4.87M | Py_ssize_t start, stop, step; |
3647 | 4.87M | if (PySlice_Unpack(item, &start, &stop, &step) < 0) { |
3648 | 0 | return NULL; |
3649 | 0 | } |
3650 | 4.87M | return list_slice_wrap((PyListObject *)self, start, stop, step); |
3651 | 4.87M | } |
3652 | | |
3653 | | PyObject * |
3654 | | _PyList_SliceSubscript(PyObject* _self, PyObject* item) |
3655 | 4.72M | { |
3656 | 4.72M | return list_slice_subscript(_self, item); |
3657 | 4.72M | } |
3658 | | |
3659 | | static PyObject * |
3660 | | list_subscript(PyObject* _self, PyObject* item) |
3661 | 24.0M | { |
3662 | 24.0M | PyListObject* self = (PyListObject*)_self; |
3663 | 24.0M | if (_PyIndex_Check(item)) { |
3664 | 23.8M | Py_ssize_t i; |
3665 | 23.8M | i = PyNumber_AsSsize_t(item, PyExc_IndexError); |
3666 | 23.8M | if (i == -1 && PyErr_Occurred()) |
3667 | 38 | return NULL; |
3668 | 23.8M | if (i < 0) |
3669 | 18.0M | i += PyList_GET_SIZE(self); |
3670 | 23.8M | return list_item((PyObject *)self, i); |
3671 | 23.8M | } |
3672 | 148k | else if (PySlice_Check(item)) { |
3673 | 148k | return list_slice_subscript(_self, item); |
3674 | 148k | } |
3675 | 0 | else { |
3676 | 0 | PyErr_Format(PyExc_TypeError, |
3677 | 0 | "list indices must be integers or slices, not %.200s", |
3678 | 0 | Py_TYPE(item)->tp_name); |
3679 | 0 | return NULL; |
3680 | 0 | } |
3681 | 24.0M | } |
3682 | | |
3683 | | static Py_ssize_t |
3684 | | adjust_slice_indexes(PyListObject *lst, |
3685 | | Py_ssize_t *start, Py_ssize_t *stop, |
3686 | | Py_ssize_t step) |
3687 | 274k | { |
3688 | 274k | Py_ssize_t slicelength = PySlice_AdjustIndices(Py_SIZE(lst), start, stop, |
3689 | 274k | step); |
3690 | | |
3691 | | /* Make sure s[5:2] = [..] inserts at the right place: |
3692 | | before 5, not before 2. */ |
3693 | 274k | if ((step < 0 && *start < *stop) || |
3694 | 274k | (step > 0 && *start > *stop)) |
3695 | 0 | *stop = *start; |
3696 | | |
3697 | 274k | return slicelength; |
3698 | 274k | } |
3699 | | |
3700 | | static int |
3701 | | list_ass_subscript_lock_held(PyObject *_self, PyObject *item, PyObject *value) |
3702 | 2.44M | { |
3703 | 2.44M | _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(_self); |
3704 | | |
3705 | 2.44M | PyListObject *self = (PyListObject *)_self; |
3706 | 2.44M | if (_PyIndex_Check(item)) { |
3707 | 2.16M | Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError); |
3708 | 2.16M | if (i == -1 && PyErr_Occurred()) |
3709 | 0 | return -1; |
3710 | 2.16M | if (i < 0) |
3711 | 2.15M | i += PyList_GET_SIZE(self); |
3712 | 2.16M | return list_ass_item_lock_held(self, i, value); |
3713 | 2.16M | } |
3714 | 274k | else if (PySlice_Check(item)) { |
3715 | 274k | Py_ssize_t start, stop, step; |
3716 | | |
3717 | 274k | if (PySlice_Unpack(item, &start, &stop, &step) < 0) { |
3718 | 0 | return -1; |
3719 | 0 | } |
3720 | | |
3721 | 274k | if (value == NULL) { |
3722 | | /* delete slice */ |
3723 | 24 | PyObject **garbage; |
3724 | 24 | size_t cur; |
3725 | 24 | Py_ssize_t i; |
3726 | 24 | int res; |
3727 | | |
3728 | 24 | Py_ssize_t slicelength = adjust_slice_indexes(self, &start, &stop, |
3729 | 24 | step); |
3730 | | |
3731 | 24 | if (step == 1) |
3732 | 24 | return list_ass_slice_lock_held(self, start, stop, value); |
3733 | | |
3734 | 0 | if (slicelength <= 0) |
3735 | 0 | return 0; |
3736 | | |
3737 | 0 | if (step < 0) { |
3738 | 0 | stop = start + 1; |
3739 | 0 | start = stop + step*(slicelength - 1) - 1; |
3740 | 0 | step = -step; |
3741 | 0 | } |
3742 | |
|
3743 | 0 | garbage = (PyObject**) |
3744 | 0 | PyMem_Malloc(slicelength*sizeof(PyObject*)); |
3745 | 0 | if (!garbage) { |
3746 | 0 | PyErr_NoMemory(); |
3747 | 0 | return -1; |
3748 | 0 | } |
3749 | | |
3750 | | /* drawing pictures might help understand these for |
3751 | | loops. Basically, we memmove the parts of the |
3752 | | list that are *not* part of the slice: step-1 |
3753 | | items for each item that is part of the slice, |
3754 | | and then tail end of the list that was not |
3755 | | covered by the slice */ |
3756 | 0 | for (cur = start, i = 0; |
3757 | 0 | cur < (size_t)stop; |
3758 | 0 | cur += step, i++) { |
3759 | 0 | Py_ssize_t lim = step - 1; |
3760 | |
|
3761 | 0 | garbage[i] = PyList_GET_ITEM(self, cur); |
3762 | |
|
3763 | 0 | if (cur + step >= (size_t)Py_SIZE(self)) { |
3764 | 0 | lim = Py_SIZE(self) - cur - 1; |
3765 | 0 | } |
3766 | |
|
3767 | 0 | memmove(self->ob_item + cur - i, |
3768 | 0 | self->ob_item + cur + 1, |
3769 | 0 | lim * sizeof(PyObject *)); |
3770 | 0 | } |
3771 | 0 | cur = start + (size_t)slicelength * step; |
3772 | 0 | if (cur < (size_t)Py_SIZE(self)) { |
3773 | 0 | memmove(self->ob_item + cur - slicelength, |
3774 | 0 | self->ob_item + cur, |
3775 | 0 | (Py_SIZE(self) - cur) * |
3776 | 0 | sizeof(PyObject *)); |
3777 | 0 | } |
3778 | |
|
3779 | 0 | Py_SET_SIZE(self, Py_SIZE(self) - slicelength); |
3780 | 0 | res = list_resize(self, Py_SIZE(self)); |
3781 | |
|
3782 | 0 | for (i = 0; i < slicelength; i++) { |
3783 | 0 | Py_DECREF(garbage[i]); |
3784 | 0 | } |
3785 | 0 | PyMem_Free(garbage); |
3786 | |
|
3787 | 0 | return res; |
3788 | 0 | } |
3789 | 274k | else { |
3790 | | /* assign slice */ |
3791 | 274k | PyObject *ins, *seq; |
3792 | 274k | PyObject **garbage, **seqitems, **selfitems; |
3793 | 274k | Py_ssize_t i; |
3794 | 274k | size_t cur; |
3795 | | |
3796 | | /* protect against a[::-1] = a */ |
3797 | 274k | if (self == (PyListObject*)value) { |
3798 | 0 | seq = list_slice_lock_held((PyListObject *)value, 0, |
3799 | 0 | Py_SIZE(value)); |
3800 | 0 | } |
3801 | 274k | else { |
3802 | 274k | seq = PySequence_Fast(value, |
3803 | 274k | "must assign iterable " |
3804 | 274k | "to extended slice"); |
3805 | 274k | } |
3806 | 274k | if (!seq) |
3807 | 0 | return -1; |
3808 | | |
3809 | 274k | Py_ssize_t slicelength = adjust_slice_indexes(self, &start, &stop, |
3810 | 274k | step); |
3811 | | |
3812 | 274k | if (step == 1) { |
3813 | 274k | int res = list_ass_slice_lock_held(self, start, stop, seq); |
3814 | 274k | Py_DECREF(seq); |
3815 | 274k | return res; |
3816 | 274k | } |
3817 | | |
3818 | 0 | if (PySequence_Fast_GET_SIZE(seq) != slicelength) { |
3819 | 0 | PyErr_Format(PyExc_ValueError, |
3820 | 0 | "attempt to assign sequence of " |
3821 | 0 | "size %zd to extended slice of " |
3822 | 0 | "size %zd", |
3823 | 0 | PySequence_Fast_GET_SIZE(seq), |
3824 | 0 | slicelength); |
3825 | 0 | Py_DECREF(seq); |
3826 | 0 | return -1; |
3827 | 0 | } |
3828 | | |
3829 | 0 | if (!slicelength) { |
3830 | 0 | Py_DECREF(seq); |
3831 | 0 | return 0; |
3832 | 0 | } |
3833 | | |
3834 | 0 | garbage = (PyObject**) |
3835 | 0 | PyMem_Malloc(slicelength*sizeof(PyObject*)); |
3836 | 0 | if (!garbage) { |
3837 | 0 | Py_DECREF(seq); |
3838 | 0 | PyErr_NoMemory(); |
3839 | 0 | return -1; |
3840 | 0 | } |
3841 | | |
3842 | 0 | selfitems = self->ob_item; |
3843 | 0 | seqitems = PySequence_Fast_ITEMS(seq); |
3844 | 0 | for (cur = start, i = 0; i < slicelength; |
3845 | 0 | cur += (size_t)step, i++) { |
3846 | 0 | garbage[i] = selfitems[cur]; |
3847 | 0 | ins = Py_NewRef(seqitems[i]); |
3848 | 0 | FT_ATOMIC_STORE_PTR_RELEASE(selfitems[cur], ins); |
3849 | 0 | } |
3850 | |
|
3851 | 0 | for (i = 0; i < slicelength; i++) { |
3852 | 0 | Py_DECREF(garbage[i]); |
3853 | 0 | } |
3854 | |
|
3855 | 0 | PyMem_Free(garbage); |
3856 | 0 | Py_DECREF(seq); |
3857 | |
|
3858 | 0 | return 0; |
3859 | 0 | } |
3860 | 274k | } |
3861 | 0 | else { |
3862 | 0 | PyErr_Format(PyExc_TypeError, |
3863 | 0 | "list indices must be integers or slices, not %.200s", |
3864 | 0 | Py_TYPE(item)->tp_name); |
3865 | 0 | return -1; |
3866 | 0 | } |
3867 | 2.44M | } |
3868 | | |
3869 | | static int |
3870 | | list_ass_subscript(PyObject *self, PyObject *item, PyObject *value) |
3871 | 2.44M | { |
3872 | 2.44M | int res; |
3873 | | #ifdef Py_GIL_DISABLED |
3874 | | if (PySlice_Check(item) && value != NULL && PyList_CheckExact(value)) { |
3875 | | Py_BEGIN_CRITICAL_SECTION2(self, value); |
3876 | | res = list_ass_subscript_lock_held(self, item, value); |
3877 | | Py_END_CRITICAL_SECTION2(); |
3878 | | return res; |
3879 | | } |
3880 | | #endif |
3881 | 2.44M | Py_BEGIN_CRITICAL_SECTION(self); |
3882 | 2.44M | res = list_ass_subscript_lock_held(self, item, value); |
3883 | 2.44M | Py_END_CRITICAL_SECTION(); |
3884 | 2.44M | return res; |
3885 | 2.44M | } |
3886 | | |
3887 | | static PyMappingMethods list_as_mapping = { |
3888 | | list_length, |
3889 | | list_subscript, |
3890 | | list_ass_subscript |
3891 | | }; |
3892 | | |
3893 | | PyTypeObject PyList_Type = { |
3894 | | PyVarObject_HEAD_INIT(&PyType_Type, 0) |
3895 | | "list", |
3896 | | sizeof(PyListObject), |
3897 | | 0, |
3898 | | list_dealloc, /* tp_dealloc */ |
3899 | | 0, /* tp_vectorcall_offset */ |
3900 | | 0, /* tp_getattr */ |
3901 | | 0, /* tp_setattr */ |
3902 | | 0, /* tp_as_async */ |
3903 | | list_repr, /* tp_repr */ |
3904 | | 0, /* tp_as_number */ |
3905 | | &list_as_sequence, /* tp_as_sequence */ |
3906 | | &list_as_mapping, /* tp_as_mapping */ |
3907 | | PyObject_HashNotImplemented, /* tp_hash */ |
3908 | | 0, /* tp_call */ |
3909 | | 0, /* tp_str */ |
3910 | | PyObject_GenericGetAttr, /* tp_getattro */ |
3911 | | 0, /* tp_setattro */ |
3912 | | 0, /* tp_as_buffer */ |
3913 | | Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | |
3914 | | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_LIST_SUBCLASS | |
3915 | | _Py_TPFLAGS_MATCH_SELF | Py_TPFLAGS_SEQUENCE, /* tp_flags */ |
3916 | | list___init____doc__, /* tp_doc */ |
3917 | | list_traverse, /* tp_traverse */ |
3918 | | list_clear_slot, /* tp_clear */ |
3919 | | list_richcompare, /* tp_richcompare */ |
3920 | | 0, /* tp_weaklistoffset */ |
3921 | | list_iter, /* tp_iter */ |
3922 | | 0, /* tp_iternext */ |
3923 | | list_methods, /* tp_methods */ |
3924 | | 0, /* tp_members */ |
3925 | | 0, /* tp_getset */ |
3926 | | 0, /* tp_base */ |
3927 | | 0, /* tp_dict */ |
3928 | | 0, /* tp_descr_get */ |
3929 | | 0, /* tp_descr_set */ |
3930 | | 0, /* tp_dictoffset */ |
3931 | | list___init__, /* tp_init */ |
3932 | | PyType_GenericAlloc, /* tp_alloc */ |
3933 | | PyType_GenericNew, /* tp_new */ |
3934 | | PyObject_GC_Del, /* tp_free */ |
3935 | | .tp_vectorcall = list_vectorcall, |
3936 | | .tp_version_tag = _Py_TYPE_VERSION_LIST, |
3937 | | }; |
3938 | | |
3939 | | /*********************** List Iterator **************************/ |
3940 | | |
3941 | | static void listiter_dealloc(PyObject *); |
3942 | | static int listiter_traverse(PyObject *, visitproc, void *); |
3943 | | static PyObject *listiter_next(PyObject *); |
3944 | | static PyObject *listiter_len(PyObject *, PyObject *); |
3945 | | static PyObject *listiter_reduce_general(void *_it, int forward); |
3946 | | static PyObject *listiter_reduce(PyObject *, PyObject *); |
3947 | | static PyObject *listiter_setstate(PyObject *, PyObject *state); |
3948 | | |
3949 | | PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); |
3950 | | PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); |
3951 | | PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); |
3952 | | |
3953 | | static PyMethodDef listiter_methods[] = { |
3954 | | {"__length_hint__", listiter_len, METH_NOARGS, length_hint_doc}, |
3955 | | {"__reduce__", listiter_reduce, METH_NOARGS, reduce_doc}, |
3956 | | {"__setstate__", listiter_setstate, METH_O, setstate_doc}, |
3957 | | {NULL, NULL} /* sentinel */ |
3958 | | }; |
3959 | | |
3960 | | PyTypeObject PyListIter_Type = { |
3961 | | PyVarObject_HEAD_INIT(&PyType_Type, 0) |
3962 | | "list_iterator", /* tp_name */ |
3963 | | sizeof(_PyListIterObject), /* tp_basicsize */ |
3964 | | 0, /* tp_itemsize */ |
3965 | | /* methods */ |
3966 | | listiter_dealloc, /* tp_dealloc */ |
3967 | | 0, /* tp_vectorcall_offset */ |
3968 | | 0, /* tp_getattr */ |
3969 | | 0, /* tp_setattr */ |
3970 | | 0, /* tp_as_async */ |
3971 | | 0, /* tp_repr */ |
3972 | | 0, /* tp_as_number */ |
3973 | | 0, /* tp_as_sequence */ |
3974 | | 0, /* tp_as_mapping */ |
3975 | | 0, /* tp_hash */ |
3976 | | 0, /* tp_call */ |
3977 | | 0, /* tp_str */ |
3978 | | PyObject_GenericGetAttr, /* tp_getattro */ |
3979 | | 0, /* tp_setattro */ |
3980 | | 0, /* tp_as_buffer */ |
3981 | | Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */ |
3982 | | 0, /* tp_doc */ |
3983 | | listiter_traverse, /* tp_traverse */ |
3984 | | 0, /* tp_clear */ |
3985 | | 0, /* tp_richcompare */ |
3986 | | 0, /* tp_weaklistoffset */ |
3987 | | PyObject_SelfIter, /* tp_iter */ |
3988 | | listiter_next, /* tp_iternext */ |
3989 | | listiter_methods, /* tp_methods */ |
3990 | | 0, /* tp_members */ |
3991 | | }; |
3992 | | |
3993 | | |
3994 | | static PyObject * |
3995 | | list_iter(PyObject *seq) |
3996 | 31.6M | { |
3997 | 31.6M | if (!PyList_Check(seq)) { |
3998 | 0 | PyErr_BadInternalCall(); |
3999 | 0 | return NULL; |
4000 | 0 | } |
4001 | 31.6M | _PyListIterObject *it = _Py_FREELIST_POP(_PyListIterObject, list_iters); |
4002 | 31.6M | if (it == NULL) { |
4003 | 2.09M | it = PyObject_GC_New(_PyListIterObject, &PyListIter_Type); |
4004 | 2.09M | if (it == NULL) { |
4005 | 0 | return NULL; |
4006 | 0 | } |
4007 | 2.09M | } |
4008 | 31.6M | it->it_index = 0; |
4009 | 31.6M | it->it_seq = (PyListObject *)Py_NewRef(seq); |
4010 | 31.6M | _PyObject_GC_TRACK(it); |
4011 | 31.6M | return (PyObject *)it; |
4012 | 31.6M | } |
4013 | | |
4014 | | static void |
4015 | | listiter_dealloc(PyObject *self) |
4016 | 31.6M | { |
4017 | 31.6M | _PyListIterObject *it = (_PyListIterObject *)self; |
4018 | 31.6M | _PyObject_GC_UNTRACK(it); |
4019 | 31.6M | Py_XDECREF(it->it_seq); |
4020 | 31.6M | assert(Py_IS_TYPE(self, &PyListIter_Type)); |
4021 | 31.6M | _Py_FREELIST_FREE(list_iters, it, PyObject_GC_Del); |
4022 | 31.6M | } |
4023 | | |
4024 | | static int |
4025 | | listiter_traverse(PyObject *it, visitproc visit, void *arg) |
4026 | 354k | { |
4027 | 354k | Py_VISIT(((_PyListIterObject *)it)->it_seq); |
4028 | 354k | return 0; |
4029 | 354k | } |
4030 | | |
4031 | | static PyObject * |
4032 | | listiter_next(PyObject *self) |
4033 | 155M | { |
4034 | 155M | _PyListIterObject *it = (_PyListIterObject *)self; |
4035 | 155M | Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index); |
4036 | 155M | if (index < 0) { |
4037 | 160 | return NULL; |
4038 | 160 | } |
4039 | | |
4040 | 155M | PyObject *item = list_get_item_ref(it->it_seq, index); |
4041 | 155M | if (item == NULL) { |
4042 | | // out-of-bounds |
4043 | 31.0M | FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, -1); |
4044 | 31.0M | #ifndef Py_GIL_DISABLED |
4045 | 31.0M | PyListObject *seq = it->it_seq; |
4046 | 31.0M | it->it_seq = NULL; |
4047 | 31.0M | Py_DECREF(seq); |
4048 | 31.0M | #endif |
4049 | 31.0M | return NULL; |
4050 | 31.0M | } |
4051 | 123M | FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index + 1); |
4052 | 123M | return item; |
4053 | 155M | } |
4054 | | |
4055 | | static PyObject * |
4056 | | listiter_len(PyObject *self, PyObject *Py_UNUSED(ignored)) |
4057 | 1.70M | { |
4058 | 1.70M | assert(self != NULL); |
4059 | 1.70M | _PyListIterObject *it = (_PyListIterObject *)self; |
4060 | 1.70M | Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index); |
4061 | 1.70M | if (index >= 0) { |
4062 | 1.70M | Py_ssize_t len = PyList_GET_SIZE(it->it_seq) - index; |
4063 | 1.70M | if (len >= 0) |
4064 | 1.70M | return PyLong_FromSsize_t(len); |
4065 | 1.70M | } |
4066 | 0 | return PyLong_FromLong(0); |
4067 | 1.70M | } |
4068 | | |
4069 | | static PyObject * |
4070 | | listiter_reduce(PyObject *it, PyObject *Py_UNUSED(ignored)) |
4071 | 0 | { |
4072 | 0 | return listiter_reduce_general(it, 1); |
4073 | 0 | } |
4074 | | |
4075 | | static PyObject * |
4076 | | listiter_setstate(PyObject *self, PyObject *state) |
4077 | 0 | { |
4078 | 0 | _PyListIterObject *it = (_PyListIterObject *)self; |
4079 | 0 | Py_ssize_t index = PyLong_AsSsize_t(state); |
4080 | 0 | if (index == -1 && PyErr_Occurred()) |
4081 | 0 | return NULL; |
4082 | 0 | if (it->it_seq != NULL) { |
4083 | 0 | if (index < -1) |
4084 | 0 | index = -1; |
4085 | 0 | else if (index > PyList_GET_SIZE(it->it_seq)) |
4086 | 0 | index = PyList_GET_SIZE(it->it_seq); /* iterator exhausted */ |
4087 | 0 | FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index); |
4088 | 0 | } |
4089 | 0 | Py_RETURN_NONE; |
4090 | 0 | } |
4091 | | |
4092 | | /*********************** List Reverse Iterator **************************/ |
4093 | | |
4094 | | typedef struct { |
4095 | | PyObject_HEAD |
4096 | | Py_ssize_t it_index; |
4097 | | PyListObject *it_seq; /* Set to NULL when iterator is exhausted */ |
4098 | | } listreviterobject; |
4099 | | |
4100 | | static void listreviter_dealloc(PyObject *); |
4101 | | static int listreviter_traverse(PyObject *, visitproc, void *); |
4102 | | static PyObject *listreviter_next(PyObject *); |
4103 | | static PyObject *listreviter_len(PyObject *, PyObject *); |
4104 | | static PyObject *listreviter_reduce(PyObject *, PyObject *); |
4105 | | static PyObject *listreviter_setstate(PyObject *, PyObject *); |
4106 | | |
4107 | | static PyMethodDef listreviter_methods[] = { |
4108 | | {"__length_hint__", listreviter_len, METH_NOARGS, length_hint_doc}, |
4109 | | {"__reduce__", listreviter_reduce, METH_NOARGS, reduce_doc}, |
4110 | | {"__setstate__", listreviter_setstate, METH_O, setstate_doc}, |
4111 | | {NULL, NULL} /* sentinel */ |
4112 | | }; |
4113 | | |
4114 | | PyTypeObject PyListRevIter_Type = { |
4115 | | PyVarObject_HEAD_INIT(&PyType_Type, 0) |
4116 | | "list_reverseiterator", /* tp_name */ |
4117 | | sizeof(listreviterobject), /* tp_basicsize */ |
4118 | | 0, /* tp_itemsize */ |
4119 | | /* methods */ |
4120 | | listreviter_dealloc, /* tp_dealloc */ |
4121 | | 0, /* tp_vectorcall_offset */ |
4122 | | 0, /* tp_getattr */ |
4123 | | 0, /* tp_setattr */ |
4124 | | 0, /* tp_as_async */ |
4125 | | 0, /* tp_repr */ |
4126 | | 0, /* tp_as_number */ |
4127 | | 0, /* tp_as_sequence */ |
4128 | | 0, /* tp_as_mapping */ |
4129 | | 0, /* tp_hash */ |
4130 | | 0, /* tp_call */ |
4131 | | 0, /* tp_str */ |
4132 | | PyObject_GenericGetAttr, /* tp_getattro */ |
4133 | | 0, /* tp_setattro */ |
4134 | | 0, /* tp_as_buffer */ |
4135 | | Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */ |
4136 | | 0, /* tp_doc */ |
4137 | | listreviter_traverse, /* tp_traverse */ |
4138 | | 0, /* tp_clear */ |
4139 | | 0, /* tp_richcompare */ |
4140 | | 0, /* tp_weaklistoffset */ |
4141 | | PyObject_SelfIter, /* tp_iter */ |
4142 | | listreviter_next, /* tp_iternext */ |
4143 | | listreviter_methods, /* tp_methods */ |
4144 | | 0, |
4145 | | }; |
4146 | | |
4147 | | /*[clinic input] |
4148 | | list.__reversed__ |
4149 | | |
4150 | | Return a reverse iterator over the list. |
4151 | | [clinic start generated code]*/ |
4152 | | |
4153 | | static PyObject * |
4154 | | list___reversed___impl(PyListObject *self) |
4155 | | /*[clinic end generated code: output=b166f073208c888c input=eadb6e17f8a6a280]*/ |
4156 | 49.1M | { |
4157 | 49.1M | listreviterobject *it; |
4158 | | |
4159 | 49.1M | it = PyObject_GC_New(listreviterobject, &PyListRevIter_Type); |
4160 | 49.1M | if (it == NULL) |
4161 | 0 | return NULL; |
4162 | 49.1M | assert(PyList_Check(self)); |
4163 | 49.1M | it->it_index = PyList_GET_SIZE(self) - 1; |
4164 | 49.1M | it->it_seq = (PyListObject*)Py_NewRef(self); |
4165 | 49.1M | PyObject_GC_Track(it); |
4166 | 49.1M | return (PyObject *)it; |
4167 | 49.1M | } |
4168 | | |
4169 | | static void |
4170 | | listreviter_dealloc(PyObject *self) |
4171 | 49.1M | { |
4172 | 49.1M | listreviterobject *it = (listreviterobject *)self; |
4173 | 49.1M | PyObject_GC_UnTrack(it); |
4174 | 49.1M | Py_XDECREF(it->it_seq); |
4175 | 49.1M | PyObject_GC_Del(it); |
4176 | 49.1M | } |
4177 | | |
4178 | | static int |
4179 | | listreviter_traverse(PyObject *it, visitproc visit, void *arg) |
4180 | 833 | { |
4181 | 833 | Py_VISIT(((listreviterobject *)it)->it_seq); |
4182 | 833 | return 0; |
4183 | 833 | } |
4184 | | |
4185 | | static PyObject * |
4186 | | listreviter_next(PyObject *self) |
4187 | 59.3M | { |
4188 | 59.3M | listreviterobject *it = (listreviterobject *)self; |
4189 | 59.3M | assert(it != NULL); |
4190 | 59.3M | Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index); |
4191 | 59.3M | if (index < 0) { |
4192 | 32.2M | return NULL; |
4193 | 32.2M | } |
4194 | | |
4195 | 27.1M | PyListObject *seq = it->it_seq; |
4196 | 27.1M | assert(PyList_Check(seq)); |
4197 | 27.1M | PyObject *item = list_get_item_ref(seq, index); |
4198 | 27.1M | if (item != NULL) { |
4199 | 27.1M | FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index - 1); |
4200 | 27.1M | return item; |
4201 | 27.1M | } |
4202 | 0 | FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, -1); |
4203 | 0 | #ifndef Py_GIL_DISABLED |
4204 | 0 | it->it_seq = NULL; |
4205 | 0 | Py_DECREF(seq); |
4206 | 0 | #endif |
4207 | 0 | return NULL; |
4208 | 27.1M | } |
4209 | | |
4210 | | static PyObject * |
4211 | | listreviter_len(PyObject *self, PyObject *Py_UNUSED(ignored)) |
4212 | 0 | { |
4213 | 0 | listreviterobject *it = (listreviterobject *)self; |
4214 | 0 | Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index); |
4215 | 0 | Py_ssize_t len = index + 1; |
4216 | 0 | if (it->it_seq == NULL || PyList_GET_SIZE(it->it_seq) < len) |
4217 | 0 | len = 0; |
4218 | 0 | return PyLong_FromSsize_t(len); |
4219 | 0 | } |
4220 | | |
4221 | | static PyObject * |
4222 | | listreviter_reduce(PyObject *it, PyObject *Py_UNUSED(ignored)) |
4223 | 0 | { |
4224 | 0 | return listiter_reduce_general(it, 0); |
4225 | 0 | } |
4226 | | |
4227 | | static PyObject * |
4228 | | listreviter_setstate(PyObject *self, PyObject *state) |
4229 | 0 | { |
4230 | 0 | listreviterobject *it = (listreviterobject *)self; |
4231 | 0 | Py_ssize_t index = PyLong_AsSsize_t(state); |
4232 | 0 | if (index == -1 && PyErr_Occurred()) |
4233 | 0 | return NULL; |
4234 | 0 | if (it->it_seq != NULL) { |
4235 | 0 | if (index < -1) |
4236 | 0 | index = -1; |
4237 | 0 | else if (index > PyList_GET_SIZE(it->it_seq) - 1) |
4238 | 0 | index = PyList_GET_SIZE(it->it_seq) - 1; |
4239 | 0 | FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index); |
4240 | 0 | } |
4241 | 0 | Py_RETURN_NONE; |
4242 | 0 | } |
4243 | | |
4244 | | /* common pickling support */ |
4245 | | |
4246 | | static PyObject * |
4247 | | listiter_reduce_general(void *_it, int forward) |
4248 | 0 | { |
4249 | 0 | PyObject *list; |
4250 | 0 | PyObject *iter; |
4251 | | |
4252 | | /* _PyEval_GetBuiltin can invoke arbitrary code, |
4253 | | * call must be before access of iterator pointers. |
4254 | | * see issue #101765 */ |
4255 | |
|
4256 | 0 | if (forward) { |
4257 | 0 | iter = _PyEval_GetBuiltin(&_Py_ID(iter)); |
4258 | 0 | _PyListIterObject *it = (_PyListIterObject *)_it; |
4259 | 0 | Py_ssize_t idx = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index); |
4260 | 0 | if (idx >= 0) { |
4261 | 0 | return Py_BuildValue("N(O)n", iter, it->it_seq, idx); |
4262 | 0 | } |
4263 | 0 | } else { |
4264 | 0 | iter = _PyEval_GetBuiltin(&_Py_ID(reversed)); |
4265 | 0 | listreviterobject *it = (listreviterobject *)_it; |
4266 | 0 | Py_ssize_t idx = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index); |
4267 | 0 | if (idx >= 0) { |
4268 | 0 | return Py_BuildValue("N(O)n", iter, it->it_seq, idx); |
4269 | 0 | } |
4270 | 0 | } |
4271 | | /* empty iterator, create an empty list */ |
4272 | 0 | list = PyList_New(0); |
4273 | 0 | if (list == NULL) |
4274 | 0 | return NULL; |
4275 | 0 | return Py_BuildValue("N(N)", iter, list); |
4276 | 0 | } |