1use super::{
2 PositionIterInternal, PyGenericAlias, PyStrRef, PyType, PyTypeRef, iter::builtins_iter,
3};
4use crate::common::lock::LazyLock;
5use crate::common::{
6 hash::{PyHash, PyUHash},
7 lock::PyMutex,
8 wtf8::wtf8_concat,
9};
10use crate::object::{Traverse, TraverseFn};
11use crate::{
12 AsObject, Context, Py, PyObject, PyObjectRef, PyPayload, PyRef, PyResult, TryFromObject,
13 atomic_func,
14 class::PyClassImpl,
15 convert::{ToPyObject, TransmuteFromObject},
16 function::{ArgSize, FuncArgs, OptionalArg, PyArithmeticValue, PyComparisonValue},
17 iter::PyExactSizeIterator,
18 protocol::{PyIterReturn, PyMappingMethods, PyNumberMethods, PySequenceMethods},
19 recursion::ReprGuard,
20 sequence::{OptionalRangeArgs, SequenceExt},
21 sliceable::{SequenceIndex, SliceableSequenceOp},
22 types::{
23 AsMapping, AsNumber, AsSequence, Comparable, Constructor, Hashable, IterNext, Iterable,
24 PyComparisonOp, Representable, SelfIter,
25 },
26 utils::collection_repr,
27 vm::VirtualMachine,
28};
29use alloc::fmt;
30use core::cell::Cell;
31use core::ptr::NonNull;
32
33#[pyclass(module = false, name = "tuple", traverse = "manual")]
34pub struct PyTuple<R = PyObjectRef> {
35 elements: Box<[R]>,
36}
37
38impl<R> fmt::Debug for PyTuple<R> {
39 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
40 f.write_str("tuple")
42 }
43}
44
45unsafe impl Traverse for PyTuple {
48 fn traverse(&self, traverse_fn: &mut TraverseFn<'_>) {
49 self.elements.traverse(traverse_fn);
50 }
51
52 fn clear(&mut self, out: &mut Vec<PyObjectRef>) {
53 let elements = core::mem::take(&mut self.elements);
54 out.extend(elements.into_vec());
55 }
56}
57
58struct TupleFreeList {
62 buckets: [Vec<NonNull<PyObject>>; Self::MAX_SAVE_SIZE],
63}
64
65impl TupleFreeList {
66 const MAX_SAVE_SIZE: usize = 20;
68 const fn new() -> Self {
69 Self {
70 buckets: [const { Vec::new() }; Self::MAX_SAVE_SIZE],
71 }
72 }
73}
74
75impl Default for TupleFreeList {
76 fn default() -> Self {
77 Self::new()
78 }
79}
80
81impl Drop for TupleFreeList {
82 fn drop(&mut self) {
83 let layout = crate::object::pyinner_layout::<PyTuple>();
86 for bucket in &mut self.buckets {
87 for ptr in bucket.drain(..) {
88 unsafe {
89 alloc::alloc::dealloc(ptr.as_ptr() as *mut u8, layout);
90 }
91 }
92 }
93 }
94}
95
96thread_local! {
97 static TUPLE_FREELIST: Cell<TupleFreeList> = const { Cell::new(TupleFreeList::new()) };
98}
99
100impl PyPayload for PyTuple {
101 const MAX_FREELIST: usize = 2000;
102 const HAS_FREELIST: bool = true;
103
104 #[inline]
105 fn class(ctx: &Context) -> &'static Py<PyType> {
106 ctx.types.tuple_type
107 }
108
109 #[inline]
110 unsafe fn freelist_push(obj: *mut PyObject) -> bool {
111 let len = unsafe { &*(obj as *const crate::Py<PyTuple>) }
112 .elements
113 .len();
114 if len == 0 || len > TupleFreeList::MAX_SAVE_SIZE {
115 return false;
116 }
117 TUPLE_FREELIST
118 .try_with(|fl| {
119 let mut list = fl.take();
120 let bucket = &mut list.buckets[len - 1];
121 let stored = if bucket.len() < Self::MAX_FREELIST {
122 bucket.push(unsafe { NonNull::new_unchecked(obj) });
123 true
124 } else {
125 false
126 };
127 fl.set(list);
128 stored
129 })
130 .unwrap_or(false)
131 }
132
133 #[inline]
134 unsafe fn freelist_pop(payload: &Self) -> Option<NonNull<PyObject>> {
135 let len = payload.elements.len();
136 if len == 0 || len > TupleFreeList::MAX_SAVE_SIZE {
137 return None;
138 }
139 TUPLE_FREELIST
140 .try_with(|fl| {
141 let mut list = fl.take();
142 let result = list.buckets[len - 1].pop();
143 fl.set(list);
144 result
145 })
146 .ok()
147 .flatten()
148 }
149}
150
151pub trait IntoPyTuple {
152 fn into_pytuple(self, vm: &VirtualMachine) -> PyTupleRef;
153}
154
155impl IntoPyTuple for () {
156 fn into_pytuple(self, vm: &VirtualMachine) -> PyTupleRef {
157 vm.ctx.empty_tuple.clone()
158 }
159}
160
161impl IntoPyTuple for Vec<PyObjectRef> {
162 fn into_pytuple(self, vm: &VirtualMachine) -> PyTupleRef {
163 PyTuple::new_ref(self, &vm.ctx)
164 }
165}
166
167pub trait FromPyTuple<'a>: Sized {
168 fn from_pytuple(tuple: &'a PyTuple, vm: &VirtualMachine) -> PyResult<Self>;
169}
170
171macro_rules! impl_from_into_pytuple {
172 ($($T:ident),+) => {
173 impl<$($T: ToPyObject),*> IntoPyTuple for ($($T,)*) {
174 fn into_pytuple(self, vm: &VirtualMachine) -> PyTupleRef {
175 #[allow(non_snake_case)]
176 let ($($T,)*) = self;
177 PyTuple::new_ref(vec![$($T.to_pyobject(vm)),*], &vm.ctx)
178 }
179 }
180
181 impl<'a, $($T: TryFromObject),*> FromPyTuple<'a> for ($($T,)*) {
184 fn from_pytuple(tuple: &'a PyTuple, vm: &VirtualMachine) -> PyResult<Self> {
185 #[allow(non_snake_case)]
186 let &[$(ref $T),+] = tuple.as_slice().try_into().map_err(|_| {
187 vm.new_type_error(format!("expected tuple with {} elements", impl_from_into_pytuple!(@count $($T)+)))
188 })?;
189 Ok(($($T::try_from_object(vm, $T.clone())?,)+))
190
191 }
192 }
193
194 impl<$($T: ToPyObject),*> ToPyObject for ($($T,)*) {
195 fn to_pyobject(self, vm: &VirtualMachine) -> PyObjectRef {
196 self.into_pytuple(vm).into()
197 }
198 }
199 };
200 (@count $($T:ident)+) => {
201 0 $(+ impl_from_into_pytuple!(@discard $T))+
202 };
203 (@discard $T:ident) => {
204 1
205 };
206}
207
208impl_from_into_pytuple!(A);
209impl_from_into_pytuple!(A, B);
210impl_from_into_pytuple!(A, B, C);
211impl_from_into_pytuple!(A, B, C, D);
212impl_from_into_pytuple!(A, B, C, D, E);
213impl_from_into_pytuple!(A, B, C, D, E, F);
214impl_from_into_pytuple!(A, B, C, D, E, F, G);
215
216pub type PyTupleRef = PyRef<PyTuple>;
217
218impl Constructor for PyTuple {
219 type Args = Vec<PyObjectRef>;
220
221 fn slot_new(cls: PyTypeRef, args: FuncArgs, vm: &VirtualMachine) -> PyResult {
222 let iterable: OptionalArg<PyObjectRef> = args.bind(vm)?;
223
224 if cls.is(vm.ctx.types.tuple_type) {
226 if let OptionalArg::Present(ref input) = iterable
228 && let Ok(tuple) = input.clone().downcast_exact::<PyTuple>(vm)
229 {
230 return Ok(tuple.into_pyref().into());
231 }
232
233 if iterable.is_missing() {
235 return Ok(vm.ctx.empty_tuple.clone().into());
236 }
237 }
238
239 let elements = if let OptionalArg::Present(iterable) = iterable {
240 iterable.try_to_value(vm)?
241 } else {
242 vec![]
243 };
244
245 if elements.is_empty() && cls.is(vm.ctx.types.tuple_type) {
247 return Ok(vm.ctx.empty_tuple.clone().into());
248 }
249
250 let payload = Self::py_new(&cls, elements, vm)?;
251 payload.into_ref_with_type(vm, cls).map(Into::into)
252 }
253
254 fn py_new(_cls: &Py<PyType>, elements: Self::Args, _vm: &VirtualMachine) -> PyResult<Self> {
255 Ok(Self {
256 elements: elements.into_boxed_slice(),
257 })
258 }
259}
260
261impl<R> AsRef<[R]> for PyTuple<R> {
262 fn as_ref(&self) -> &[R] {
263 &self.elements
264 }
265}
266
267impl<R> core::ops::Deref for PyTuple<R> {
268 type Target = [R];
269
270 fn deref(&self) -> &[R] {
271 &self.elements
272 }
273}
274
275impl<'a, R> core::iter::IntoIterator for &'a PyTuple<R> {
276 type Item = &'a R;
277 type IntoIter = core::slice::Iter<'a, R>;
278
279 fn into_iter(self) -> Self::IntoIter {
280 self.iter()
281 }
282}
283
284impl<'a, R> core::iter::IntoIterator for &'a Py<PyTuple<R>> {
285 type Item = &'a R;
286 type IntoIter = core::slice::Iter<'a, R>;
287
288 fn into_iter(self) -> Self::IntoIter {
289 self.iter()
290 }
291}
292
293impl<R> PyTuple<R> {
294 pub const fn as_slice(&self) -> &[R] {
295 &self.elements
296 }
297
298 #[inline]
299 pub fn len(&self) -> usize {
300 self.elements.len()
301 }
302
303 #[inline]
304 pub fn is_empty(&self) -> bool {
305 self.elements.is_empty()
306 }
307
308 #[inline]
309 pub fn iter(&self) -> core::slice::Iter<'_, R> {
310 self.elements.iter()
311 }
312}
313
314impl PyTuple<PyObjectRef> {
315 pub fn new_ref(elements: Vec<PyObjectRef>, ctx: &Context) -> PyRef<Self> {
317 if elements.is_empty() {
318 ctx.empty_tuple.clone()
319 } else {
320 let elements = elements.into_boxed_slice();
321 PyRef::new_ref(Self { elements }, ctx.types.tuple_type.to_owned(), None)
322 }
323 }
324
325 pub const fn new_unchecked(elements: Box<[PyObjectRef]>) -> Self {
329 Self { elements }
330 }
331
332 fn repeat(zelf: PyRef<Self>, value: isize, vm: &VirtualMachine) -> PyResult<PyRef<Self>> {
333 Ok(if zelf.elements.is_empty() || value == 0 {
334 vm.ctx.empty_tuple.clone()
335 } else if value == 1 && zelf.class().is(vm.ctx.types.tuple_type) {
336 zelf
341 } else {
342 let v = zelf.elements.mul(vm, value)?;
343 let elements = v.into_boxed_slice();
344 Self { elements }.into_ref(&vm.ctx)
345 })
346 }
347
348 pub fn extract_tuple<'a, T: FromPyTuple<'a>>(&'a self, vm: &VirtualMachine) -> PyResult<T> {
349 T::from_pytuple(self, vm)
350 }
351}
352
353impl<T> PyTuple<PyRef<T>> {
354 pub fn new_ref_typed(elements: Vec<PyRef<T>>, ctx: &Context) -> PyRef<Self> {
355 unsafe {
357 let elements: Vec<PyObjectRef> =
358 core::mem::transmute::<Vec<PyRef<T>>, Vec<PyObjectRef>>(elements);
359 let tuple = PyTuple::<PyObjectRef>::new_ref(elements, ctx);
360 core::mem::transmute::<PyRef<PyTuple>, PyRef<Self>>(tuple)
361 }
362 }
363}
364
365#[pyclass(
366 itemsize = core::mem::size_of::<crate::PyObjectRef>(),
367 flags(BASETYPE, SEQUENCE, _MATCH_SELF),
368 with(AsMapping, AsNumber, AsSequence, Hashable, Comparable, Iterable, Constructor, Representable)
369)]
370impl PyTuple {
371 fn __add__(
372 zelf: PyRef<Self>,
373 other: PyObjectRef,
374 vm: &VirtualMachine,
375 ) -> PyArithmeticValue<PyRef<Self>> {
376 let added = other.downcast::<Self>().map(|other| {
377 if other.elements.is_empty() && zelf.class().is(vm.ctx.types.tuple_type) {
378 zelf
379 } else if zelf.elements.is_empty() && other.class().is(vm.ctx.types.tuple_type) {
380 other
381 } else {
382 let elements = zelf
383 .iter()
384 .chain(other.as_slice())
385 .cloned()
386 .collect::<Box<[_]>>();
387 Self { elements }.into_ref(&vm.ctx)
388 }
389 });
390 PyArithmeticValue::from_option(added.ok())
391 }
392
393 #[pymethod]
394 fn count(&self, needle: PyObjectRef, vm: &VirtualMachine) -> PyResult<usize> {
395 let mut count: usize = 0;
396 for element in self {
397 if vm.identical_or_equal(element, &needle)? {
398 count += 1;
399 }
400 }
401 Ok(count)
402 }
403
404 #[inline]
405 pub const fn __len__(&self) -> usize {
406 self.elements.len()
407 }
408
409 fn __mul__(zelf: PyRef<Self>, value: ArgSize, vm: &VirtualMachine) -> PyResult<PyRef<Self>> {
410 Self::repeat(zelf, value.into(), vm)
411 }
412
413 fn _getitem(&self, needle: &PyObject, vm: &VirtualMachine) -> PyResult {
414 match SequenceIndex::try_from_borrowed_object(vm, needle, "tuple")? {
415 SequenceIndex::Int(i) => {
416 let index = self
417 .elements
418 .wrap_index(i)
419 .ok_or_else(|| vm.new_index_error("tuple index out of range"))?;
420 Ok(self.elements[index].clone())
421 }
422 SequenceIndex::Slice(slice) => self
423 .elements
424 .getitem_by_slice(vm, slice)
425 .map(|x| vm.ctx.new_tuple(x).into()),
426 }
427 }
428
429 fn __getitem__(&self, needle: PyObjectRef, vm: &VirtualMachine) -> PyResult {
430 self._getitem(&needle, vm)
431 }
432
433 #[pymethod]
434 fn index(
435 &self,
436 needle: PyObjectRef,
437 range: OptionalRangeArgs,
438 vm: &VirtualMachine,
439 ) -> PyResult<usize> {
440 let (start, stop) = range.saturate(self.len(), vm)?;
441 for (index, element) in self.elements.iter().enumerate().take(stop).skip(start) {
442 if vm.identical_or_equal(element, &needle)? {
443 return Ok(index);
444 }
445 }
446 Err(vm.new_value_error("tuple.index(x): x not in tuple"))
447 }
448
449 fn _contains(&self, needle: &PyObject, vm: &VirtualMachine) -> PyResult<bool> {
450 for element in &self.elements {
451 if vm.identical_or_equal(element, needle)? {
452 return Ok(true);
453 }
454 }
455 Ok(false)
456 }
457
458 fn __contains__(&self, needle: PyObjectRef, vm: &VirtualMachine) -> PyResult<bool> {
459 self._contains(&needle, vm)
460 }
461
462 #[pymethod]
463 fn __getnewargs__(zelf: PyRef<Self>, vm: &VirtualMachine) -> (PyTupleRef,) {
464 let tup_arg = if zelf.class().is(vm.ctx.types.tuple_type) {
468 zelf
469 } else {
470 Self::new_ref(zelf.elements.clone().into_vec(), &vm.ctx)
471 };
472 (tup_arg,)
473 }
474
475 #[pyclassmethod]
476 fn __class_getitem__(cls: PyTypeRef, args: PyObjectRef, vm: &VirtualMachine) -> PyGenericAlias {
477 PyGenericAlias::from_args(cls, args, vm)
478 }
479}
480
481impl AsMapping for PyTuple {
482 fn as_mapping() -> &'static PyMappingMethods {
483 static AS_MAPPING: LazyLock<PyMappingMethods> = LazyLock::new(|| PyMappingMethods {
484 length: atomic_func!(|mapping, _vm| Ok(PyTuple::mapping_downcast(mapping).len())),
485 subscript: atomic_func!(
486 |mapping, needle, vm| PyTuple::mapping_downcast(mapping)._getitem(needle, vm)
487 ),
488 ..PyMappingMethods::NOT_IMPLEMENTED
489 });
490 &AS_MAPPING
491 }
492}
493
494impl AsSequence for PyTuple {
495 fn as_sequence() -> &'static PySequenceMethods {
496 static AS_SEQUENCE: LazyLock<PySequenceMethods> = LazyLock::new(|| PySequenceMethods {
497 length: atomic_func!(|seq, _vm| Ok(PyTuple::sequence_downcast(seq).__len__())),
498 concat: atomic_func!(|seq, other, vm| {
499 let zelf = PyTuple::sequence_downcast(seq);
500 match PyTuple::__add__(zelf.to_owned(), other.to_owned(), vm) {
501 PyArithmeticValue::Implemented(tuple) => Ok(tuple.into()),
502 PyArithmeticValue::NotImplemented => Err(vm.new_type_error(format!(
503 "can only concatenate tuple (not '{}') to tuple",
504 other.class().name()
505 ))),
506 }
507 }),
508 repeat: atomic_func!(|seq, n, vm| {
509 let zelf = PyTuple::sequence_downcast(seq);
510 PyTuple::repeat(zelf.to_owned(), n, vm).map(|x| x.into())
511 }),
512 item: atomic_func!(|seq, i, vm| {
513 let zelf = PyTuple::sequence_downcast(seq);
514 zelf.elements.getitem_by_index(vm, i)
515 }),
516 contains: atomic_func!(|seq, needle, vm| {
517 let zelf = PyTuple::sequence_downcast(seq);
518 zelf._contains(needle, vm)
519 }),
520 ..PySequenceMethods::NOT_IMPLEMENTED
521 });
522 &AS_SEQUENCE
523 }
524}
525
526impl AsNumber for PyTuple {
527 fn as_number() -> &'static PyNumberMethods {
528 static AS_NUMBER: PyNumberMethods = PyNumberMethods {
529 boolean: Some(|number, _vm| {
530 let zelf = number.obj.downcast_ref::<PyTuple>().unwrap();
531 Ok(!zelf.elements.is_empty())
532 }),
533 ..PyNumberMethods::NOT_IMPLEMENTED
534 };
535 &AS_NUMBER
536 }
537}
538
539impl Hashable for PyTuple {
540 #[inline]
541 fn hash(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyHash> {
542 tuple_hash(zelf.as_slice(), vm)
543 }
544}
545
546impl Comparable for PyTuple {
547 fn cmp(
548 zelf: &Py<Self>,
549 other: &PyObject,
550 op: PyComparisonOp,
551 vm: &VirtualMachine,
552 ) -> PyResult<PyComparisonValue> {
553 if let Some(res) = op.identical_optimization(zelf, other) {
554 return Ok(res.into());
555 }
556 let other = class_or_notimplemented!(Self, other);
557 zelf.iter()
558 .richcompare(other.iter(), op, vm)
559 .map(PyComparisonValue::Implemented)
560 }
561}
562
563impl Iterable for PyTuple {
564 fn iter(zelf: PyRef<Self>, vm: &VirtualMachine) -> PyResult {
565 Ok(PyTupleIterator {
566 internal: PyMutex::new(PositionIterInternal::new(zelf, 0)),
567 }
568 .into_pyobject(vm))
569 }
570}
571
572impl Representable for PyTuple {
573 #[inline]
574 fn repr(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyStrRef> {
575 let s = if zelf.is_empty() {
576 vm.ctx.intern_str("()").to_owned()
577 } else if let Some(_guard) = ReprGuard::enter(vm, zelf.as_object()) {
578 let s = if zelf.len() == 1 {
579 wtf8_concat!("(", zelf.elements[0].repr(vm)?.as_wtf8(), ",)")
580 } else {
581 collection_repr(None, "(", ")", zelf.elements.iter(), vm)?
582 };
583 vm.ctx.new_str(s)
584 } else {
585 vm.ctx.intern_str("(...)").to_owned()
586 };
587 Ok(s)
588 }
589
590 #[cold]
591 fn repr_str(_zelf: &Py<Self>, _vm: &VirtualMachine) -> PyResult<String> {
592 unreachable!("use repr instead")
593 }
594}
595
596impl PyRef<PyTuple<PyObjectRef>> {
597 pub fn try_into_typed<T: PyPayload>(
598 self,
599 vm: &VirtualMachine,
600 ) -> PyResult<PyRef<PyTuple<PyRef<T>>>> {
601 for elem in self.as_slice() {
603 <PyRef<T> as TransmuteFromObject>::check(vm, elem)?;
604 }
605 Ok(unsafe { core::mem::transmute::<Self, PyRef<PyTuple<PyRef<T>>>>(self) })
607 }
608}
609
610impl<T: PyPayload> PyRef<PyTuple<PyRef<T>>> {
611 pub fn into_untyped(self) -> PyRef<PyTuple> {
612 unsafe { core::mem::transmute::<Self, PyRef<PyTuple>>(self) }
614 }
615}
616
617impl<T: PyPayload> Py<PyTuple<PyRef<T>>> {
618 pub fn as_untyped(&self) -> &Py<PyTuple> {
619 unsafe { core::mem::transmute::<&Self, &Py<PyTuple>>(self) }
621 }
622}
623
624impl<T: PyPayload> From<PyRef<PyTuple<PyRef<T>>>> for PyTupleRef {
625 #[inline]
626 fn from(tup: PyRef<PyTuple<PyRef<T>>>) -> Self {
627 tup.into_untyped()
628 }
629}
630
631#[pyclass(module = false, name = "tuple_iterator", traverse)]
632#[derive(Debug)]
633pub(crate) struct PyTupleIterator {
634 internal: PyMutex<PositionIterInternal<PyTupleRef>>,
635}
636
637impl PyPayload for PyTupleIterator {
638 fn class(ctx: &Context) -> &'static Py<PyType> {
639 ctx.types.tuple_iterator_type
640 }
641}
642
643#[pyclass(flags(DISALLOW_INSTANTIATION), with(IterNext, Iterable))]
644impl PyTupleIterator {
645 #[pymethod]
646 fn __length_hint__(&self) -> usize {
647 self.internal.lock().length_hint(|obj| obj.len())
648 }
649
650 #[pymethod]
651 fn __setstate__(&self, state: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> {
652 self.internal
653 .lock()
654 .set_state(state, |obj, pos| pos.min(obj.len()), vm)
655 }
656
657 #[pymethod]
658 fn __reduce__(&self, vm: &VirtualMachine) -> PyTupleRef {
659 let func = builtins_iter(vm);
660 self.internal.lock().reduce(
661 func,
662 |x| x.clone().into(),
663 |vm| vm.ctx.empty_tuple.clone().into(),
664 vm,
665 )
666 }
667}
668
669impl PyTupleIterator {
670 pub(crate) fn fast_next(&self) -> Option<PyObjectRef> {
672 self.internal
673 .lock()
674 .next(|tuple, pos| {
675 Ok(PyIterReturn::from_result(
676 tuple.get(pos).cloned().ok_or(None),
677 ))
678 })
679 .ok()
680 .and_then(|r| match r {
681 PyIterReturn::Return(v) => Some(v),
682 PyIterReturn::StopIteration(_) => None,
683 })
684 }
685}
686
687impl SelfIter for PyTupleIterator {}
688impl IterNext for PyTupleIterator {
689 fn next(zelf: &Py<Self>, _vm: &VirtualMachine) -> PyResult<PyIterReturn> {
690 zelf.internal.lock().next(|tuple, pos| {
691 Ok(PyIterReturn::from_result(
692 tuple.get(pos).cloned().ok_or(None),
693 ))
694 })
695 }
696}
697
698fn vectorcall_tuple(
699 zelf_obj: &PyObject,
700 args: Vec<PyObjectRef>,
701 nargs: usize,
702 kwnames: Option<&[PyObjectRef]>,
703 vm: &VirtualMachine,
704) -> PyResult {
705 let zelf: &Py<PyType> = zelf_obj.downcast_ref().unwrap();
706 let func_args = FuncArgs::from_vectorcall_owned(args, nargs, kwnames);
707 (zelf.slots.new.load().unwrap())(zelf.to_owned(), func_args, vm)
710}
711
712pub(crate) fn init(context: &'static Context) {
713 PyTuple::extend_class(context, context.types.tuple_type);
714 PyTupleIterator::extend_class(context, context.types.tuple_iterator_type);
715 context
716 .types
717 .tuple_type
718 .slots
719 .vectorcall
720 .store(Some(vectorcall_tuple));
721}
722
723pub(super) fn tuple_hash(elements: &[PyObjectRef], vm: &VirtualMachine) -> PyResult<PyHash> {
724 #[cfg(target_pointer_width = "64")]
725 const PRIME1: PyUHash = 11400714785074694791;
726 #[cfg(target_pointer_width = "64")]
727 const PRIME2: PyUHash = 14029467366897019727;
728 #[cfg(target_pointer_width = "64")]
729 const PRIME5: PyUHash = 2870177450012600261;
730 #[cfg(target_pointer_width = "64")]
731 const ROTATE: u32 = 31;
732
733 #[cfg(target_pointer_width = "32")]
734 const PRIME1: PyUHash = 2654435761;
735 #[cfg(target_pointer_width = "32")]
736 const PRIME2: PyUHash = 2246822519;
737 #[cfg(target_pointer_width = "32")]
738 const PRIME5: PyUHash = 374761393;
739 #[cfg(target_pointer_width = "32")]
740 const ROTATE: u32 = 13;
741
742 let mut acc = PRIME5;
743 let len = elements.len() as PyUHash;
744
745 for val in elements {
746 let lane = val.hash(vm)? as PyUHash;
747 acc = acc.wrapping_add(lane.wrapping_mul(PRIME2));
748 acc = acc.rotate_left(ROTATE);
749 acc = acc.wrapping_mul(PRIME1);
750 }
751
752 acc = acc.wrapping_add(len ^ (PRIME5 ^ 3527539));
753
754 if acc as PyHash == -1 {
755 return Ok(1546275796);
756 }
757 Ok(acc as PyHash)
758}