1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
use oxc_ast_macros::{ast, CloneIn};

#[ast]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, CloneIn)]
pub enum NumberBase {
    Float = 0,
    Decimal = 1,
    Binary = 2,
    Octal = 3,
    Hex = 4,
}

impl NumberBase {
    pub fn is_base_10(&self) -> bool {
        matches!(self, Self::Float | Self::Decimal)
    }
}

#[ast]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, CloneIn)]
pub enum BigintBase {
    Decimal = 0,
    Binary = 1,
    Octal = 2,
    Hex = 3,
}

impl BigintBase {
    pub fn is_base_10(&self) -> bool {
        self == &Self::Decimal
    }
}

/// <https://tc39.es/ecma262/#sec-numeric-types-number-tostring>
#[cfg(feature = "to_js_string")]
pub trait ToJsString {
    fn to_js_string(&self) -> String;
}

#[cfg(feature = "to_js_string")]
impl ToJsString for f64 {
    fn to_js_string(&self) -> String {
        let mut buffer = ryu_js::Buffer::new();
        buffer.format(*self).to_string()
    }
}

/// Converts a 64-bit floating point number to an `i32` according to the [`ToInt32`][ToInt32] algorithm.
///
/// [ToInt32]: https://tc39.es/ecma262/#sec-toint32
///
/// This is copied from [Boa](https://github.com/boa-dev/boa/blob/61567687cf4bfeca6bd548c3e72b6965e74b2461/core/engine/src/builtins/number/conversions.rs)
pub trait ToJsInt32 {
    fn to_js_int_32(&self) -> i32;
}

impl ToJsInt32 for f64 {
    #[allow(clippy::float_cmp, clippy::cast_possible_truncation, clippy::cast_possible_wrap)]
    fn to_js_int_32(&self) -> i32 {
        const SIGN_MASK: u64 = 0x8000_0000_0000_0000;
        const EXPONENT_MASK: u64 = 0x7FF0_0000_0000_0000;
        const SIGNIFICAND_MASK: u64 = 0x000F_FFFF_FFFF_FFFF;
        const HIDDEN_BIT: u64 = 0x0010_0000_0000_0000;
        const PHYSICAL_SIGNIFICAND_SIZE: i32 = 52; // Excludes the hidden bit.
        const SIGNIFICAND_SIZE: i32 = 53;

        const EXPONENT_BIAS: i32 = 0x3FF + PHYSICAL_SIGNIFICAND_SIZE;
        const DENORMAL_EXPONENT: i32 = -EXPONENT_BIAS + 1;

        fn is_denormal(number: f64) -> bool {
            (number.to_bits() & EXPONENT_MASK) == 0
        }

        fn exponent(number: f64) -> i32 {
            if is_denormal(number) {
                return DENORMAL_EXPONENT;
            }

            let d64 = number.to_bits();
            let biased_e = ((d64 & EXPONENT_MASK) >> PHYSICAL_SIGNIFICAND_SIZE) as i32;

            biased_e - EXPONENT_BIAS
        }

        fn significand(number: f64) -> u64 {
            let d64 = number.to_bits();
            let significand = d64 & SIGNIFICAND_MASK;

            if is_denormal(number) {
                significand
            } else {
                significand + HIDDEN_BIT
            }
        }

        fn sign(number: f64) -> i64 {
            if (number.to_bits() & SIGN_MASK) == 0 {
                1
            } else {
                -1
            }
        }

        let number = *self;

        if number.is_finite() && number <= f64::from(i32::MAX) && number >= f64::from(i32::MIN) {
            let i = number as i32;
            if f64::from(i) == number {
                return i;
            }
        }

        let exponent = exponent(number);
        let bits = if exponent < 0 {
            if exponent <= -SIGNIFICAND_SIZE {
                return 0;
            }

            significand(number) >> -exponent
        } else {
            if exponent > 31 {
                return 0;
            }

            (significand(number) << exponent) & 0xFFFF_FFFF
        };

        (sign(number) * (bits as i64)) as i32
    }
}