core/num/f128.rs
1//! Constants for the `f128` quadruple-precision floating point type.
2//!
3//! *[See also the `f128` primitive type][f128].*
4//!
5//! Mathematically significant numbers are provided in the `consts` sub-module.
6//!
7//! For the constants defined directly in this module
8//! (as distinct from those defined in the `consts` sub-module),
9//! new code should instead use the associated constants
10//! defined directly on the `f128` type.
11
12#![unstable(feature = "f128", issue = "116909")]
13
14use crate::convert::FloatToInt;
15use crate::num::FpCategory;
16use crate::panic::const_assert;
17use crate::{intrinsics, mem};
18
19/// Basic mathematical constants.
20#[unstable(feature = "f128", issue = "116909")]
21pub mod consts {
22 // FIXME: replace with mathematical constants from cmath.
23
24 /// Archimedes' constant (π)
25 #[unstable(feature = "f128", issue = "116909")]
26 pub const PI: f128 = 3.14159265358979323846264338327950288419716939937510582097494_f128;
27
28 /// The full circle constant (τ)
29 ///
30 /// Equal to 2π.
31 #[unstable(feature = "f128", issue = "116909")]
32 pub const TAU: f128 = 6.28318530717958647692528676655900576839433879875021164194989_f128;
33
34 /// The golden ratio (φ)
35 #[unstable(feature = "f128", issue = "116909")]
36 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
37 pub const PHI: f128 = 1.61803398874989484820458683436563811772030917980576286213545_f128;
38
39 /// The Euler-Mascheroni constant (γ)
40 #[unstable(feature = "f128", issue = "116909")]
41 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
42 pub const EGAMMA: f128 = 0.577215664901532860606512090082402431042159335939923598805767_f128;
43
44 /// π/2
45 #[unstable(feature = "f128", issue = "116909")]
46 pub const FRAC_PI_2: f128 = 1.57079632679489661923132169163975144209858469968755291048747_f128;
47
48 /// π/3
49 #[unstable(feature = "f128", issue = "116909")]
50 pub const FRAC_PI_3: f128 = 1.04719755119659774615421446109316762806572313312503527365831_f128;
51
52 /// π/4
53 #[unstable(feature = "f128", issue = "116909")]
54 pub const FRAC_PI_4: f128 = 0.785398163397448309615660845819875721049292349843776455243736_f128;
55
56 /// π/6
57 #[unstable(feature = "f128", issue = "116909")]
58 pub const FRAC_PI_6: f128 = 0.523598775598298873077107230546583814032861566562517636829157_f128;
59
60 /// π/8
61 #[unstable(feature = "f128", issue = "116909")]
62 pub const FRAC_PI_8: f128 = 0.392699081698724154807830422909937860524646174921888227621868_f128;
63
64 /// 1/π
65 #[unstable(feature = "f128", issue = "116909")]
66 pub const FRAC_1_PI: f128 = 0.318309886183790671537767526745028724068919291480912897495335_f128;
67
68 /// 1/sqrt(π)
69 #[unstable(feature = "f128", issue = "116909")]
70 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
71 pub const FRAC_1_SQRT_PI: f128 =
72 0.564189583547756286948079451560772585844050629328998856844086_f128;
73
74 /// 1/sqrt(2π)
75 #[doc(alias = "FRAC_1_SQRT_TAU")]
76 #[unstable(feature = "f128", issue = "116909")]
77 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
78 pub const FRAC_1_SQRT_2PI: f128 =
79 0.398942280401432677939946059934381868475858631164934657665926_f128;
80
81 /// 2/π
82 #[unstable(feature = "f128", issue = "116909")]
83 pub const FRAC_2_PI: f128 = 0.636619772367581343075535053490057448137838582961825794990669_f128;
84
85 /// 2/sqrt(π)
86 #[unstable(feature = "f128", issue = "116909")]
87 pub const FRAC_2_SQRT_PI: f128 =
88 1.12837916709551257389615890312154517168810125865799771368817_f128;
89
90 /// sqrt(2)
91 #[unstable(feature = "f128", issue = "116909")]
92 pub const SQRT_2: f128 = 1.41421356237309504880168872420969807856967187537694807317668_f128;
93
94 /// 1/sqrt(2)
95 #[unstable(feature = "f128", issue = "116909")]
96 pub const FRAC_1_SQRT_2: f128 =
97 0.707106781186547524400844362104849039284835937688474036588340_f128;
98
99 /// sqrt(3)
100 #[unstable(feature = "f128", issue = "116909")]
101 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
102 pub const SQRT_3: f128 = 1.73205080756887729352744634150587236694280525381038062805581_f128;
103
104 /// 1/sqrt(3)
105 #[unstable(feature = "f128", issue = "116909")]
106 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
107 pub const FRAC_1_SQRT_3: f128 =
108 0.577350269189625764509148780501957455647601751270126876018602_f128;
109
110 /// Euler's number (e)
111 #[unstable(feature = "f128", issue = "116909")]
112 pub const E: f128 = 2.71828182845904523536028747135266249775724709369995957496697_f128;
113
114 /// log<sub>2</sub>(10)
115 #[unstable(feature = "f128", issue = "116909")]
116 pub const LOG2_10: f128 = 3.32192809488736234787031942948939017586483139302458061205476_f128;
117
118 /// log<sub>2</sub>(e)
119 #[unstable(feature = "f128", issue = "116909")]
120 pub const LOG2_E: f128 = 1.44269504088896340735992468100189213742664595415298593413545_f128;
121
122 /// log<sub>10</sub>(2)
123 #[unstable(feature = "f128", issue = "116909")]
124 pub const LOG10_2: f128 = 0.301029995663981195213738894724493026768189881462108541310427_f128;
125
126 /// log<sub>10</sub>(e)
127 #[unstable(feature = "f128", issue = "116909")]
128 pub const LOG10_E: f128 = 0.434294481903251827651128918916605082294397005803666566114454_f128;
129
130 /// ln(2)
131 #[unstable(feature = "f128", issue = "116909")]
132 pub const LN_2: f128 = 0.693147180559945309417232121458176568075500134360255254120680_f128;
133
134 /// ln(10)
135 #[unstable(feature = "f128", issue = "116909")]
136 pub const LN_10: f128 = 2.30258509299404568401799145468436420760110148862877297603333_f128;
137}
138
139impl f128 {
140 // FIXME(f16_f128): almost all methods in this `impl` are missing examples and a const
141 // implementation. Add these once we can run code on all platforms and have f16/f128 in CTFE.
142
143 /// The radix or base of the internal representation of `f128`.
144 #[unstable(feature = "f128", issue = "116909")]
145 pub const RADIX: u32 = 2;
146
147 /// Number of significant digits in base 2.
148 ///
149 /// Note that the size of the mantissa in the bitwise representation is one
150 /// smaller than this since the leading 1 is not stored explicitly.
151 #[unstable(feature = "f128", issue = "116909")]
152 pub const MANTISSA_DIGITS: u32 = 113;
153
154 /// Approximate number of significant digits in base 10.
155 ///
156 /// This is the maximum <i>x</i> such that any decimal number with <i>x</i>
157 /// significant digits can be converted to `f128` and back without loss.
158 ///
159 /// Equal to floor(log<sub>10</sub> 2<sup>[`MANTISSA_DIGITS`] − 1</sup>).
160 ///
161 /// [`MANTISSA_DIGITS`]: f128::MANTISSA_DIGITS
162 #[unstable(feature = "f128", issue = "116909")]
163 pub const DIGITS: u32 = 33;
164
165 /// [Machine epsilon] value for `f128`.
166 ///
167 /// This is the difference between `1.0` and the next larger representable number.
168 ///
169 /// Equal to 2<sup>1 − [`MANTISSA_DIGITS`]</sup>.
170 ///
171 /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
172 /// [`MANTISSA_DIGITS`]: f128::MANTISSA_DIGITS
173 #[unstable(feature = "f128", issue = "116909")]
174 #[rustc_diagnostic_item = "f128_epsilon"]
175 pub const EPSILON: f128 = 1.92592994438723585305597794258492732e-34_f128;
176
177 /// Smallest finite `f128` value.
178 ///
179 /// Equal to −[`MAX`].
180 ///
181 /// [`MAX`]: f128::MAX
182 #[unstable(feature = "f128", issue = "116909")]
183 pub const MIN: f128 = -1.18973149535723176508575932662800702e+4932_f128;
184 /// Smallest positive normal `f128` value.
185 ///
186 /// Equal to 2<sup>[`MIN_EXP`] − 1</sup>.
187 ///
188 /// [`MIN_EXP`]: f128::MIN_EXP
189 #[unstable(feature = "f128", issue = "116909")]
190 pub const MIN_POSITIVE: f128 = 3.36210314311209350626267781732175260e-4932_f128;
191 /// Largest finite `f128` value.
192 ///
193 /// Equal to
194 /// (1 − 2<sup>−[`MANTISSA_DIGITS`]</sup>) 2<sup>[`MAX_EXP`]</sup>.
195 ///
196 /// [`MANTISSA_DIGITS`]: f128::MANTISSA_DIGITS
197 /// [`MAX_EXP`]: f128::MAX_EXP
198 #[unstable(feature = "f128", issue = "116909")]
199 pub const MAX: f128 = 1.18973149535723176508575932662800702e+4932_f128;
200
201 /// One greater than the minimum possible *normal* power of 2 exponent
202 /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
203 ///
204 /// This corresponds to the exact minimum possible *normal* power of 2 exponent
205 /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
206 /// In other words, all normal numbers representable by this type are
207 /// greater than or equal to 0.5 × 2<sup><i>MIN_EXP</i></sup>.
208 #[unstable(feature = "f128", issue = "116909")]
209 pub const MIN_EXP: i32 = -16_381;
210 /// One greater than the maximum possible power of 2 exponent
211 /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
212 ///
213 /// This corresponds to the exact maximum possible power of 2 exponent
214 /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
215 /// In other words, all numbers representable by this type are
216 /// strictly less than 2<sup><i>MAX_EXP</i></sup>.
217 #[unstable(feature = "f128", issue = "116909")]
218 pub const MAX_EXP: i32 = 16_384;
219
220 /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
221 ///
222 /// Equal to ceil(log<sub>10</sub> [`MIN_POSITIVE`]).
223 ///
224 /// [`MIN_POSITIVE`]: f128::MIN_POSITIVE
225 #[unstable(feature = "f128", issue = "116909")]
226 pub const MIN_10_EXP: i32 = -4_931;
227 /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
228 ///
229 /// Equal to floor(log<sub>10</sub> [`MAX`]).
230 ///
231 /// [`MAX`]: f128::MAX
232 #[unstable(feature = "f128", issue = "116909")]
233 pub const MAX_10_EXP: i32 = 4_932;
234
235 /// Not a Number (NaN).
236 ///
237 /// Note that IEEE 754 doesn't define just a single NaN value; a plethora of bit patterns are
238 /// considered to be NaN. Furthermore, the standard makes a difference between a "signaling" and
239 /// a "quiet" NaN, and allows inspecting its "payload" (the unspecified bits in the bit pattern)
240 /// and its sign. See the [specification of NaN bit patterns](f32#nan-bit-patterns) for more
241 /// info.
242 ///
243 /// This constant is guaranteed to be a quiet NaN (on targets that follow the Rust assumptions
244 /// that the quiet/signaling bit being set to 1 indicates a quiet NaN). Beyond that, nothing is
245 /// guaranteed about the specific bit pattern chosen here: both payload and sign are arbitrary.
246 /// The concrete bit pattern may change across Rust versions and target platforms.
247 #[allow(clippy::eq_op)]
248 #[rustc_diagnostic_item = "f128_nan"]
249 #[unstable(feature = "f128", issue = "116909")]
250 pub const NAN: f128 = 0.0_f128 / 0.0_f128;
251
252 /// Infinity (∞).
253 #[unstable(feature = "f128", issue = "116909")]
254 pub const INFINITY: f128 = 1.0_f128 / 0.0_f128;
255
256 /// Negative infinity (−∞).
257 #[unstable(feature = "f128", issue = "116909")]
258 pub const NEG_INFINITY: f128 = -1.0_f128 / 0.0_f128;
259
260 /// Sign bit
261 pub(crate) const SIGN_MASK: u128 = 0x8000_0000_0000_0000_0000_0000_0000_0000;
262
263 /// Exponent mask
264 pub(crate) const EXP_MASK: u128 = 0x7fff_0000_0000_0000_0000_0000_0000_0000;
265
266 /// Mantissa mask
267 pub(crate) const MAN_MASK: u128 = 0x0000_ffff_ffff_ffff_ffff_ffff_ffff_ffff;
268
269 /// Minimum representable positive value (min subnormal)
270 const TINY_BITS: u128 = 0x1;
271
272 /// Minimum representable negative value (min negative subnormal)
273 const NEG_TINY_BITS: u128 = Self::TINY_BITS | Self::SIGN_MASK;
274
275 /// Returns `true` if this value is NaN.
276 ///
277 /// ```
278 /// #![feature(f128)]
279 /// # // FIXME(f16_f128): remove when `unordtf2` is available
280 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
281 ///
282 /// let nan = f128::NAN;
283 /// let f = 7.0_f128;
284 ///
285 /// assert!(nan.is_nan());
286 /// assert!(!f.is_nan());
287 /// # }
288 /// ```
289 #[inline]
290 #[must_use]
291 #[unstable(feature = "f128", issue = "116909")]
292 #[allow(clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :)
293 pub const fn is_nan(self) -> bool {
294 self != self
295 }
296
297 /// Returns `true` if this value is positive infinity or negative infinity, and
298 /// `false` otherwise.
299 ///
300 /// ```
301 /// #![feature(f128)]
302 /// # // FIXME(f16_f128): remove when `eqtf2` is available
303 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
304 ///
305 /// let f = 7.0f128;
306 /// let inf = f128::INFINITY;
307 /// let neg_inf = f128::NEG_INFINITY;
308 /// let nan = f128::NAN;
309 ///
310 /// assert!(!f.is_infinite());
311 /// assert!(!nan.is_infinite());
312 ///
313 /// assert!(inf.is_infinite());
314 /// assert!(neg_inf.is_infinite());
315 /// # }
316 /// ```
317 #[inline]
318 #[must_use]
319 #[unstable(feature = "f128", issue = "116909")]
320 pub const fn is_infinite(self) -> bool {
321 (self == f128::INFINITY) | (self == f128::NEG_INFINITY)
322 }
323
324 /// Returns `true` if this number is neither infinite nor NaN.
325 ///
326 /// ```
327 /// #![feature(f128)]
328 /// # // FIXME(f16_f128): remove when `lttf2` is available
329 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
330 ///
331 /// let f = 7.0f128;
332 /// let inf: f128 = f128::INFINITY;
333 /// let neg_inf: f128 = f128::NEG_INFINITY;
334 /// let nan: f128 = f128::NAN;
335 ///
336 /// assert!(f.is_finite());
337 ///
338 /// assert!(!nan.is_finite());
339 /// assert!(!inf.is_finite());
340 /// assert!(!neg_inf.is_finite());
341 /// # }
342 /// ```
343 #[inline]
344 #[must_use]
345 #[unstable(feature = "f128", issue = "116909")]
346 #[rustc_const_unstable(feature = "f128", issue = "116909")]
347 pub const fn is_finite(self) -> bool {
348 // There's no need to handle NaN separately: if self is NaN,
349 // the comparison is not true, exactly as desired.
350 self.abs() < Self::INFINITY
351 }
352
353 /// Returns `true` if the number is [subnormal].
354 ///
355 /// ```
356 /// #![feature(f128)]
357 /// # // FIXME(f16_f128): remove when `eqtf2` is available
358 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
359 ///
360 /// let min = f128::MIN_POSITIVE; // 3.362103143e-4932f128
361 /// let max = f128::MAX;
362 /// let lower_than_min = 1.0e-4960_f128;
363 /// let zero = 0.0_f128;
364 ///
365 /// assert!(!min.is_subnormal());
366 /// assert!(!max.is_subnormal());
367 ///
368 /// assert!(!zero.is_subnormal());
369 /// assert!(!f128::NAN.is_subnormal());
370 /// assert!(!f128::INFINITY.is_subnormal());
371 /// // Values between `0` and `min` are Subnormal.
372 /// assert!(lower_than_min.is_subnormal());
373 /// # }
374 /// ```
375 ///
376 /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
377 #[inline]
378 #[must_use]
379 #[unstable(feature = "f128", issue = "116909")]
380 pub const fn is_subnormal(self) -> bool {
381 matches!(self.classify(), FpCategory::Subnormal)
382 }
383
384 /// Returns `true` if the number is neither zero, infinite, [subnormal], or NaN.
385 ///
386 /// ```
387 /// #![feature(f128)]
388 /// # // FIXME(f16_f128): remove when `eqtf2` is available
389 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
390 ///
391 /// let min = f128::MIN_POSITIVE; // 3.362103143e-4932f128
392 /// let max = f128::MAX;
393 /// let lower_than_min = 1.0e-4960_f128;
394 /// let zero = 0.0_f128;
395 ///
396 /// assert!(min.is_normal());
397 /// assert!(max.is_normal());
398 ///
399 /// assert!(!zero.is_normal());
400 /// assert!(!f128::NAN.is_normal());
401 /// assert!(!f128::INFINITY.is_normal());
402 /// // Values between `0` and `min` are Subnormal.
403 /// assert!(!lower_than_min.is_normal());
404 /// # }
405 /// ```
406 ///
407 /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
408 #[inline]
409 #[must_use]
410 #[unstable(feature = "f128", issue = "116909")]
411 pub const fn is_normal(self) -> bool {
412 matches!(self.classify(), FpCategory::Normal)
413 }
414
415 /// Returns the floating point category of the number. If only one property
416 /// is going to be tested, it is generally faster to use the specific
417 /// predicate instead.
418 ///
419 /// ```
420 /// #![feature(f128)]
421 /// # // FIXME(f16_f128): remove when `eqtf2` is available
422 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
423 ///
424 /// use std::num::FpCategory;
425 ///
426 /// let num = 12.4_f128;
427 /// let inf = f128::INFINITY;
428 ///
429 /// assert_eq!(num.classify(), FpCategory::Normal);
430 /// assert_eq!(inf.classify(), FpCategory::Infinite);
431 /// # }
432 /// ```
433 #[inline]
434 #[unstable(feature = "f128", issue = "116909")]
435 pub const fn classify(self) -> FpCategory {
436 let bits = self.to_bits();
437 match (bits & Self::MAN_MASK, bits & Self::EXP_MASK) {
438 (0, Self::EXP_MASK) => FpCategory::Infinite,
439 (_, Self::EXP_MASK) => FpCategory::Nan,
440 (0, 0) => FpCategory::Zero,
441 (_, 0) => FpCategory::Subnormal,
442 _ => FpCategory::Normal,
443 }
444 }
445
446 /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
447 /// positive sign bit and positive infinity.
448 ///
449 /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
450 /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
451 /// conserved over arithmetic operations, the result of `is_sign_positive` on
452 /// a NaN might produce an unexpected or non-portable result. See the [specification
453 /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == 1.0`
454 /// if you need fully portable behavior (will return `false` for all NaNs).
455 ///
456 /// ```
457 /// #![feature(f128)]
458 ///
459 /// let f = 7.0_f128;
460 /// let g = -7.0_f128;
461 ///
462 /// assert!(f.is_sign_positive());
463 /// assert!(!g.is_sign_positive());
464 /// ```
465 #[inline]
466 #[must_use]
467 #[unstable(feature = "f128", issue = "116909")]
468 pub const fn is_sign_positive(self) -> bool {
469 !self.is_sign_negative()
470 }
471
472 /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
473 /// negative sign bit and negative infinity.
474 ///
475 /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
476 /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
477 /// conserved over arithmetic operations, the result of `is_sign_negative` on
478 /// a NaN might produce an unexpected or non-portable result. See the [specification
479 /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == -1.0`
480 /// if you need fully portable behavior (will return `false` for all NaNs).
481 ///
482 /// ```
483 /// #![feature(f128)]
484 ///
485 /// let f = 7.0_f128;
486 /// let g = -7.0_f128;
487 ///
488 /// assert!(!f.is_sign_negative());
489 /// assert!(g.is_sign_negative());
490 /// ```
491 #[inline]
492 #[must_use]
493 #[unstable(feature = "f128", issue = "116909")]
494 pub const fn is_sign_negative(self) -> bool {
495 // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
496 // applies to zeros and NaNs as well.
497 // SAFETY: This is just transmuting to get the sign bit, it's fine.
498 (self.to_bits() & (1 << 127)) != 0
499 }
500
501 /// Returns the least number greater than `self`.
502 ///
503 /// Let `TINY` be the smallest representable positive `f128`. Then,
504 /// - if `self.is_nan()`, this returns `self`;
505 /// - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
506 /// - if `self` is `-TINY`, this returns -0.0;
507 /// - if `self` is -0.0 or +0.0, this returns `TINY`;
508 /// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
509 /// - otherwise the unique least value greater than `self` is returned.
510 ///
511 /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
512 /// is finite `x == x.next_up().next_down()` also holds.
513 ///
514 /// ```rust
515 /// #![feature(f128)]
516 /// # // FIXME(f16_f128): remove when `eqtf2` is available
517 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
518 ///
519 /// // f128::EPSILON is the difference between 1.0 and the next number up.
520 /// assert_eq!(1.0f128.next_up(), 1.0 + f128::EPSILON);
521 /// // But not for most numbers.
522 /// assert!(0.1f128.next_up() < 0.1 + f128::EPSILON);
523 /// assert_eq!(4611686018427387904f128.next_up(), 4611686018427387904.000000000000001);
524 /// # }
525 /// ```
526 ///
527 /// This operation corresponds to IEEE-754 `nextUp`.
528 ///
529 /// [`NEG_INFINITY`]: Self::NEG_INFINITY
530 /// [`INFINITY`]: Self::INFINITY
531 /// [`MIN`]: Self::MIN
532 /// [`MAX`]: Self::MAX
533 #[inline]
534 #[doc(alias = "nextUp")]
535 #[unstable(feature = "f128", issue = "116909")]
536 pub const fn next_up(self) -> Self {
537 // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
538 // denormals to zero. This is in general unsound and unsupported, but here
539 // we do our best to still produce the correct result on such targets.
540 let bits = self.to_bits();
541 if self.is_nan() || bits == Self::INFINITY.to_bits() {
542 return self;
543 }
544
545 let abs = bits & !Self::SIGN_MASK;
546 let next_bits = if abs == 0 {
547 Self::TINY_BITS
548 } else if bits == abs {
549 bits + 1
550 } else {
551 bits - 1
552 };
553 Self::from_bits(next_bits)
554 }
555
556 /// Returns the greatest number less than `self`.
557 ///
558 /// Let `TINY` be the smallest representable positive `f128`. Then,
559 /// - if `self.is_nan()`, this returns `self`;
560 /// - if `self` is [`INFINITY`], this returns [`MAX`];
561 /// - if `self` is `TINY`, this returns 0.0;
562 /// - if `self` is -0.0 or +0.0, this returns `-TINY`;
563 /// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
564 /// - otherwise the unique greatest value less than `self` is returned.
565 ///
566 /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
567 /// is finite `x == x.next_down().next_up()` also holds.
568 ///
569 /// ```rust
570 /// #![feature(f128)]
571 /// # // FIXME(f16_f128): remove when `eqtf2` is available
572 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
573 ///
574 /// let x = 1.0f128;
575 /// // Clamp value into range [0, 1).
576 /// let clamped = x.clamp(0.0, 1.0f128.next_down());
577 /// assert!(clamped < 1.0);
578 /// assert_eq!(clamped.next_up(), 1.0);
579 /// # }
580 /// ```
581 ///
582 /// This operation corresponds to IEEE-754 `nextDown`.
583 ///
584 /// [`NEG_INFINITY`]: Self::NEG_INFINITY
585 /// [`INFINITY`]: Self::INFINITY
586 /// [`MIN`]: Self::MIN
587 /// [`MAX`]: Self::MAX
588 #[inline]
589 #[doc(alias = "nextDown")]
590 #[unstable(feature = "f128", issue = "116909")]
591 pub const fn next_down(self) -> Self {
592 // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
593 // denormals to zero. This is in general unsound and unsupported, but here
594 // we do our best to still produce the correct result on such targets.
595 let bits = self.to_bits();
596 if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
597 return self;
598 }
599
600 let abs = bits & !Self::SIGN_MASK;
601 let next_bits = if abs == 0 {
602 Self::NEG_TINY_BITS
603 } else if bits == abs {
604 bits - 1
605 } else {
606 bits + 1
607 };
608 Self::from_bits(next_bits)
609 }
610
611 /// Takes the reciprocal (inverse) of a number, `1/x`.
612 ///
613 /// ```
614 /// #![feature(f128)]
615 /// # // FIXME(f16_f128): remove when `eqtf2` is available
616 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
617 ///
618 /// let x = 2.0_f128;
619 /// let abs_difference = (x.recip() - (1.0 / x)).abs();
620 ///
621 /// assert!(abs_difference <= f128::EPSILON);
622 /// # }
623 /// ```
624 #[inline]
625 #[unstable(feature = "f128", issue = "116909")]
626 #[must_use = "this returns the result of the operation, without modifying the original"]
627 pub const fn recip(self) -> Self {
628 1.0 / self
629 }
630
631 /// Converts radians to degrees.
632 ///
633 /// # Unspecified precision
634 ///
635 /// The precision of this function is non-deterministic. This means it varies by platform,
636 /// Rust version, and can even differ within the same execution from one invocation to the next.
637 ///
638 /// # Examples
639 ///
640 /// ```
641 /// #![feature(f128)]
642 /// # // FIXME(f16_f128): remove when `eqtf2` is available
643 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
644 ///
645 /// let angle = std::f128::consts::PI;
646 ///
647 /// let abs_difference = (angle.to_degrees() - 180.0).abs();
648 /// assert!(abs_difference <= f128::EPSILON);
649 /// # }
650 /// ```
651 #[inline]
652 #[unstable(feature = "f128", issue = "116909")]
653 #[must_use = "this returns the result of the operation, without modifying the original"]
654 pub const fn to_degrees(self) -> Self {
655 // The division here is correctly rounded with respect to the true value of 180/π.
656 // Although π is irrational and already rounded, the double rounding happens
657 // to produce correct result for f128.
658 const PIS_IN_180: f128 = 180.0 / consts::PI;
659 self * PIS_IN_180
660 }
661
662 /// Converts degrees to radians.
663 ///
664 /// # Unspecified precision
665 ///
666 /// The precision of this function is non-deterministic. This means it varies by platform,
667 /// Rust version, and can even differ within the same execution from one invocation to the next.
668 ///
669 /// # Examples
670 ///
671 /// ```
672 /// #![feature(f128)]
673 /// # // FIXME(f16_f128): remove when `eqtf2` is available
674 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
675 ///
676 /// let angle = 180.0f128;
677 ///
678 /// let abs_difference = (angle.to_radians() - std::f128::consts::PI).abs();
679 ///
680 /// assert!(abs_difference <= 1e-30);
681 /// # }
682 /// ```
683 #[inline]
684 #[unstable(feature = "f128", issue = "116909")]
685 #[must_use = "this returns the result of the operation, without modifying the original"]
686 pub const fn to_radians(self) -> f128 {
687 // Use a literal to avoid double rounding, consts::PI is already rounded,
688 // and dividing would round again.
689 const RADS_PER_DEG: f128 =
690 0.0174532925199432957692369076848861271344287188854172545609719_f128;
691 self * RADS_PER_DEG
692 }
693
694 /// Returns the maximum of the two numbers, ignoring NaN.
695 ///
696 /// If one of the arguments is NaN, then the other argument is returned.
697 /// This follows the IEEE 754-2008 semantics for maxNum, except for handling of signaling NaNs;
698 /// this function handles all NaNs the same way and avoids maxNum's problems with associativity.
699 /// This also matches the behavior of libm’s fmax. In particular, if the inputs compare equal
700 /// (such as for the case of `+0.0` and `-0.0`), either input may be returned non-deterministically.
701 ///
702 /// ```
703 /// #![feature(f128)]
704 /// # // Using aarch64 because `reliable_f128_math` is needed
705 /// # #[cfg(all(target_arch = "aarch64", target_os = "linux"))] {
706 ///
707 /// let x = 1.0f128;
708 /// let y = 2.0f128;
709 ///
710 /// assert_eq!(x.max(y), y);
711 /// # }
712 /// ```
713 #[inline]
714 #[unstable(feature = "f128", issue = "116909")]
715 #[rustc_const_unstable(feature = "f128", issue = "116909")]
716 #[must_use = "this returns the result of the comparison, without modifying either input"]
717 pub const fn max(self, other: f128) -> f128 {
718 intrinsics::maxnumf128(self, other)
719 }
720
721 /// Returns the minimum of the two numbers, ignoring NaN.
722 ///
723 /// If one of the arguments is NaN, then the other argument is returned.
724 /// This follows the IEEE 754-2008 semantics for minNum, except for handling of signaling NaNs;
725 /// this function handles all NaNs the same way and avoids minNum's problems with associativity.
726 /// This also matches the behavior of libm’s fmin. In particular, if the inputs compare equal
727 /// (such as for the case of `+0.0` and `-0.0`), either input may be returned non-deterministically.
728 ///
729 /// ```
730 /// #![feature(f128)]
731 /// # // Using aarch64 because `reliable_f128_math` is needed
732 /// # #[cfg(all(target_arch = "aarch64", target_os = "linux"))] {
733 ///
734 /// let x = 1.0f128;
735 /// let y = 2.0f128;
736 ///
737 /// assert_eq!(x.min(y), x);
738 /// # }
739 /// ```
740 #[inline]
741 #[unstable(feature = "f128", issue = "116909")]
742 #[rustc_const_unstable(feature = "f128", issue = "116909")]
743 #[must_use = "this returns the result of the comparison, without modifying either input"]
744 pub const fn min(self, other: f128) -> f128 {
745 intrinsics::minnumf128(self, other)
746 }
747
748 /// Returns the maximum of the two numbers, propagating NaN.
749 ///
750 /// This returns NaN when *either* argument is NaN, as opposed to
751 /// [`f128::max`] which only returns NaN when *both* arguments are NaN.
752 ///
753 /// ```
754 /// #![feature(f128)]
755 /// #![feature(float_minimum_maximum)]
756 /// # // Using aarch64 because `reliable_f128_math` is needed
757 /// # #[cfg(all(target_arch = "aarch64", target_os = "linux"))] {
758 ///
759 /// let x = 1.0f128;
760 /// let y = 2.0f128;
761 ///
762 /// assert_eq!(x.maximum(y), y);
763 /// assert!(x.maximum(f128::NAN).is_nan());
764 /// # }
765 /// ```
766 ///
767 /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the greater
768 /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0.
769 /// Note that this follows the semantics specified in IEEE 754-2019.
770 ///
771 /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN
772 /// operand is conserved; see the [specification of NaN bit patterns](f32#nan-bit-patterns) for more info.
773 #[inline]
774 #[unstable(feature = "f128", issue = "116909")]
775 // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
776 #[must_use = "this returns the result of the comparison, without modifying either input"]
777 pub const fn maximum(self, other: f128) -> f128 {
778 intrinsics::maximumf128(self, other)
779 }
780
781 /// Returns the minimum of the two numbers, propagating NaN.
782 ///
783 /// This returns NaN when *either* argument is NaN, as opposed to
784 /// [`f128::min`] which only returns NaN when *both* arguments are NaN.
785 ///
786 /// ```
787 /// #![feature(f128)]
788 /// #![feature(float_minimum_maximum)]
789 /// # // Using aarch64 because `reliable_f128_math` is needed
790 /// # #[cfg(all(target_arch = "aarch64", target_os = "linux"))] {
791 ///
792 /// let x = 1.0f128;
793 /// let y = 2.0f128;
794 ///
795 /// assert_eq!(x.minimum(y), x);
796 /// assert!(x.minimum(f128::NAN).is_nan());
797 /// # }
798 /// ```
799 ///
800 /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the lesser
801 /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0.
802 /// Note that this follows the semantics specified in IEEE 754-2019.
803 ///
804 /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN
805 /// operand is conserved; see the [specification of NaN bit patterns](f32#nan-bit-patterns) for more info.
806 #[inline]
807 #[unstable(feature = "f128", issue = "116909")]
808 // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
809 #[must_use = "this returns the result of the comparison, without modifying either input"]
810 pub const fn minimum(self, other: f128) -> f128 {
811 intrinsics::minimumf128(self, other)
812 }
813
814 /// Calculates the midpoint (average) between `self` and `rhs`.
815 ///
816 /// This returns NaN when *either* argument is NaN or if a combination of
817 /// +inf and -inf is provided as arguments.
818 ///
819 /// # Examples
820 ///
821 /// ```
822 /// #![feature(f128)]
823 /// # // Using aarch64 because `reliable_f128_math` is needed
824 /// # #[cfg(all(target_arch = "aarch64", target_os = "linux"))] {
825 ///
826 /// assert_eq!(1f128.midpoint(4.0), 2.5);
827 /// assert_eq!((-5.5f128).midpoint(8.0), 1.25);
828 /// # }
829 /// ```
830 #[inline]
831 #[doc(alias = "average")]
832 #[unstable(feature = "f128", issue = "116909")]
833 #[rustc_const_unstable(feature = "f128", issue = "116909")]
834 pub const fn midpoint(self, other: f128) -> f128 {
835 const LO: f128 = f128::MIN_POSITIVE * 2.;
836 const HI: f128 = f128::MAX / 2.;
837
838 let (a, b) = (self, other);
839 let abs_a = a.abs();
840 let abs_b = b.abs();
841
842 if abs_a <= HI && abs_b <= HI {
843 // Overflow is impossible
844 (a + b) / 2.
845 } else if abs_a < LO {
846 // Not safe to halve `a` (would underflow)
847 a + (b / 2.)
848 } else if abs_b < LO {
849 // Not safe to halve `b` (would underflow)
850 (a / 2.) + b
851 } else {
852 // Safe to halve `a` and `b`
853 (a / 2.) + (b / 2.)
854 }
855 }
856
857 /// Rounds toward zero and converts to any primitive integer type,
858 /// assuming that the value is finite and fits in that type.
859 ///
860 /// ```
861 /// #![feature(f128)]
862 /// # // FIXME(f16_f128): remove when `float*itf` is available
863 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
864 ///
865 /// let value = 4.6_f128;
866 /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
867 /// assert_eq!(rounded, 4);
868 ///
869 /// let value = -128.9_f128;
870 /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
871 /// assert_eq!(rounded, i8::MIN);
872 /// # }
873 /// ```
874 ///
875 /// # Safety
876 ///
877 /// The value must:
878 ///
879 /// * Not be `NaN`
880 /// * Not be infinite
881 /// * Be representable in the return type `Int`, after truncating off its fractional part
882 #[inline]
883 #[unstable(feature = "f128", issue = "116909")]
884 #[must_use = "this returns the result of the operation, without modifying the original"]
885 pub unsafe fn to_int_unchecked<Int>(self) -> Int
886 where
887 Self: FloatToInt<Int>,
888 {
889 // SAFETY: the caller must uphold the safety contract for
890 // `FloatToInt::to_int_unchecked`.
891 unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
892 }
893
894 /// Raw transmutation to `u128`.
895 ///
896 /// This is currently identical to `transmute::<f128, u128>(self)` on all platforms.
897 ///
898 /// See [`from_bits`](#method.from_bits) for some discussion of the
899 /// portability of this operation (there are almost no issues).
900 ///
901 /// Note that this function is distinct from `as` casting, which attempts to
902 /// preserve the *numeric* value, and not the bitwise value.
903 ///
904 /// ```
905 /// #![feature(f128)]
906 ///
907 /// # // FIXME(f16_f128): enable this once const casting works
908 /// # // assert_ne!((1f128).to_bits(), 1f128 as u128); // to_bits() is not casting!
909 /// assert_eq!((12.5f128).to_bits(), 0x40029000000000000000000000000000);
910 /// ```
911 #[inline]
912 #[unstable(feature = "f128", issue = "116909")]
913 #[must_use = "this returns the result of the operation, without modifying the original"]
914 #[allow(unnecessary_transmutes)]
915 pub const fn to_bits(self) -> u128 {
916 // SAFETY: `u128` is a plain old datatype so we can always transmute to it.
917 unsafe { mem::transmute(self) }
918 }
919
920 /// Raw transmutation from `u128`.
921 ///
922 /// This is currently identical to `transmute::<u128, f128>(v)` on all platforms.
923 /// It turns out this is incredibly portable, for two reasons:
924 ///
925 /// * Floats and Ints have the same endianness on all supported platforms.
926 /// * IEEE 754 very precisely specifies the bit layout of floats.
927 ///
928 /// However there is one caveat: prior to the 2008 version of IEEE 754, how
929 /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
930 /// (notably x86 and ARM) picked the interpretation that was ultimately
931 /// standardized in 2008, but some didn't (notably MIPS). As a result, all
932 /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
933 ///
934 /// Rather than trying to preserve signaling-ness cross-platform, this
935 /// implementation favors preserving the exact bits. This means that
936 /// any payloads encoded in NaNs will be preserved even if the result of
937 /// this method is sent over the network from an x86 machine to a MIPS one.
938 ///
939 /// If the results of this method are only manipulated by the same
940 /// architecture that produced them, then there is no portability concern.
941 ///
942 /// If the input isn't NaN, then there is no portability concern.
943 ///
944 /// If you don't care about signalingness (very likely), then there is no
945 /// portability concern.
946 ///
947 /// Note that this function is distinct from `as` casting, which attempts to
948 /// preserve the *numeric* value, and not the bitwise value.
949 ///
950 /// ```
951 /// #![feature(f128)]
952 /// # // FIXME(f16_f128): remove when `eqtf2` is available
953 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
954 ///
955 /// let v = f128::from_bits(0x40029000000000000000000000000000);
956 /// assert_eq!(v, 12.5);
957 /// # }
958 /// ```
959 #[inline]
960 #[must_use]
961 #[unstable(feature = "f128", issue = "116909")]
962 #[allow(unnecessary_transmutes)]
963 pub const fn from_bits(v: u128) -> Self {
964 // It turns out the safety issues with sNaN were overblown! Hooray!
965 // SAFETY: `u128` is a plain old datatype so we can always transmute from it.
966 unsafe { mem::transmute(v) }
967 }
968
969 /// Returns the memory representation of this floating point number as a byte array in
970 /// big-endian (network) byte order.
971 ///
972 /// See [`from_bits`](Self::from_bits) for some discussion of the
973 /// portability of this operation (there are almost no issues).
974 ///
975 /// # Examples
976 ///
977 /// ```
978 /// #![feature(f128)]
979 ///
980 /// let bytes = 12.5f128.to_be_bytes();
981 /// assert_eq!(
982 /// bytes,
983 /// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
984 /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
985 /// );
986 /// ```
987 #[inline]
988 #[unstable(feature = "f128", issue = "116909")]
989 #[must_use = "this returns the result of the operation, without modifying the original"]
990 pub const fn to_be_bytes(self) -> [u8; 16] {
991 self.to_bits().to_be_bytes()
992 }
993
994 /// Returns the memory representation of this floating point number as a byte array in
995 /// little-endian byte order.
996 ///
997 /// See [`from_bits`](Self::from_bits) for some discussion of the
998 /// portability of this operation (there are almost no issues).
999 ///
1000 /// # Examples
1001 ///
1002 /// ```
1003 /// #![feature(f128)]
1004 ///
1005 /// let bytes = 12.5f128.to_le_bytes();
1006 /// assert_eq!(
1007 /// bytes,
1008 /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1009 /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40]
1010 /// );
1011 /// ```
1012 #[inline]
1013 #[unstable(feature = "f128", issue = "116909")]
1014 #[must_use = "this returns the result of the operation, without modifying the original"]
1015 pub const fn to_le_bytes(self) -> [u8; 16] {
1016 self.to_bits().to_le_bytes()
1017 }
1018
1019 /// Returns the memory representation of this floating point number as a byte array in
1020 /// native byte order.
1021 ///
1022 /// As the target platform's native endianness is used, portable code
1023 /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
1024 ///
1025 /// [`to_be_bytes`]: f128::to_be_bytes
1026 /// [`to_le_bytes`]: f128::to_le_bytes
1027 ///
1028 /// See [`from_bits`](Self::from_bits) for some discussion of the
1029 /// portability of this operation (there are almost no issues).
1030 ///
1031 /// # Examples
1032 ///
1033 /// ```
1034 /// #![feature(f128)]
1035 ///
1036 /// let bytes = 12.5f128.to_ne_bytes();
1037 /// assert_eq!(
1038 /// bytes,
1039 /// if cfg!(target_endian = "big") {
1040 /// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
1041 /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
1042 /// } else {
1043 /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1044 /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40]
1045 /// }
1046 /// );
1047 /// ```
1048 #[inline]
1049 #[unstable(feature = "f128", issue = "116909")]
1050 #[must_use = "this returns the result of the operation, without modifying the original"]
1051 pub const fn to_ne_bytes(self) -> [u8; 16] {
1052 self.to_bits().to_ne_bytes()
1053 }
1054
1055 /// Creates a floating point value from its representation as a byte array in big endian.
1056 ///
1057 /// See [`from_bits`](Self::from_bits) for some discussion of the
1058 /// portability of this operation (there are almost no issues).
1059 ///
1060 /// # Examples
1061 ///
1062 /// ```
1063 /// #![feature(f128)]
1064 /// # // FIXME(f16_f128): remove when `eqtf2` is available
1065 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1066 ///
1067 /// let value = f128::from_be_bytes(
1068 /// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
1069 /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
1070 /// );
1071 /// assert_eq!(value, 12.5);
1072 /// # }
1073 /// ```
1074 #[inline]
1075 #[must_use]
1076 #[unstable(feature = "f128", issue = "116909")]
1077 pub const fn from_be_bytes(bytes: [u8; 16]) -> Self {
1078 Self::from_bits(u128::from_be_bytes(bytes))
1079 }
1080
1081 /// Creates a floating point value from its representation as a byte array in little endian.
1082 ///
1083 /// See [`from_bits`](Self::from_bits) for some discussion of the
1084 /// portability of this operation (there are almost no issues).
1085 ///
1086 /// # Examples
1087 ///
1088 /// ```
1089 /// #![feature(f128)]
1090 /// # // FIXME(f16_f128): remove when `eqtf2` is available
1091 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1092 ///
1093 /// let value = f128::from_le_bytes(
1094 /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1095 /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40]
1096 /// );
1097 /// assert_eq!(value, 12.5);
1098 /// # }
1099 /// ```
1100 #[inline]
1101 #[must_use]
1102 #[unstable(feature = "f128", issue = "116909")]
1103 pub const fn from_le_bytes(bytes: [u8; 16]) -> Self {
1104 Self::from_bits(u128::from_le_bytes(bytes))
1105 }
1106
1107 /// Creates a floating point value from its representation as a byte array in native endian.
1108 ///
1109 /// As the target platform's native endianness is used, portable code
1110 /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
1111 /// appropriate instead.
1112 ///
1113 /// [`from_be_bytes`]: f128::from_be_bytes
1114 /// [`from_le_bytes`]: f128::from_le_bytes
1115 ///
1116 /// See [`from_bits`](Self::from_bits) for some discussion of the
1117 /// portability of this operation (there are almost no issues).
1118 ///
1119 /// # Examples
1120 ///
1121 /// ```
1122 /// #![feature(f128)]
1123 /// # // FIXME(f16_f128): remove when `eqtf2` is available
1124 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1125 ///
1126 /// let value = f128::from_ne_bytes(if cfg!(target_endian = "big") {
1127 /// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
1128 /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
1129 /// } else {
1130 /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1131 /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40]
1132 /// });
1133 /// assert_eq!(value, 12.5);
1134 /// # }
1135 /// ```
1136 #[inline]
1137 #[must_use]
1138 #[unstable(feature = "f128", issue = "116909")]
1139 pub const fn from_ne_bytes(bytes: [u8; 16]) -> Self {
1140 Self::from_bits(u128::from_ne_bytes(bytes))
1141 }
1142
1143 /// Returns the ordering between `self` and `other`.
1144 ///
1145 /// Unlike the standard partial comparison between floating point numbers,
1146 /// this comparison always produces an ordering in accordance to
1147 /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
1148 /// floating point standard. The values are ordered in the following sequence:
1149 ///
1150 /// - negative quiet NaN
1151 /// - negative signaling NaN
1152 /// - negative infinity
1153 /// - negative numbers
1154 /// - negative subnormal numbers
1155 /// - negative zero
1156 /// - positive zero
1157 /// - positive subnormal numbers
1158 /// - positive numbers
1159 /// - positive infinity
1160 /// - positive signaling NaN
1161 /// - positive quiet NaN.
1162 ///
1163 /// The ordering established by this function does not always agree with the
1164 /// [`PartialOrd`] and [`PartialEq`] implementations of `f128`. For example,
1165 /// they consider negative and positive zero equal, while `total_cmp`
1166 /// doesn't.
1167 ///
1168 /// The interpretation of the signaling NaN bit follows the definition in
1169 /// the IEEE 754 standard, which may not match the interpretation by some of
1170 /// the older, non-conformant (e.g. MIPS) hardware implementations.
1171 ///
1172 /// # Example
1173 ///
1174 /// ```
1175 /// #![feature(f128)]
1176 ///
1177 /// struct GoodBoy {
1178 /// name: &'static str,
1179 /// weight: f128,
1180 /// }
1181 ///
1182 /// let mut bois = vec![
1183 /// GoodBoy { name: "Pucci", weight: 0.1 },
1184 /// GoodBoy { name: "Woofer", weight: 99.0 },
1185 /// GoodBoy { name: "Yapper", weight: 10.0 },
1186 /// GoodBoy { name: "Chonk", weight: f128::INFINITY },
1187 /// GoodBoy { name: "Abs. Unit", weight: f128::NAN },
1188 /// GoodBoy { name: "Floaty", weight: -5.0 },
1189 /// ];
1190 ///
1191 /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
1192 ///
1193 /// // `f128::NAN` could be positive or negative, which will affect the sort order.
1194 /// if f128::NAN.is_sign_negative() {
1195 /// bois.into_iter().map(|b| b.weight)
1196 /// .zip([f128::NAN, -5.0, 0.1, 10.0, 99.0, f128::INFINITY].iter())
1197 /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1198 /// } else {
1199 /// bois.into_iter().map(|b| b.weight)
1200 /// .zip([-5.0, 0.1, 10.0, 99.0, f128::INFINITY, f128::NAN].iter())
1201 /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1202 /// }
1203 /// ```
1204 #[inline]
1205 #[must_use]
1206 #[unstable(feature = "f128", issue = "116909")]
1207 pub fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
1208 let mut left = self.to_bits() as i128;
1209 let mut right = other.to_bits() as i128;
1210
1211 // In case of negatives, flip all the bits except the sign
1212 // to achieve a similar layout as two's complement integers
1213 //
1214 // Why does this work? IEEE 754 floats consist of three fields:
1215 // Sign bit, exponent and mantissa. The set of exponent and mantissa
1216 // fields as a whole have the property that their bitwise order is
1217 // equal to the numeric magnitude where the magnitude is defined.
1218 // The magnitude is not normally defined on NaN values, but
1219 // IEEE 754 totalOrder defines the NaN values also to follow the
1220 // bitwise order. This leads to order explained in the doc comment.
1221 // However, the representation of magnitude is the same for negative
1222 // and positive numbers – only the sign bit is different.
1223 // To easily compare the floats as signed integers, we need to
1224 // flip the exponent and mantissa bits in case of negative numbers.
1225 // We effectively convert the numbers to "two's complement" form.
1226 //
1227 // To do the flipping, we construct a mask and XOR against it.
1228 // We branchlessly calculate an "all-ones except for the sign bit"
1229 // mask from negative-signed values: right shifting sign-extends
1230 // the integer, so we "fill" the mask with sign bits, and then
1231 // convert to unsigned to push one more zero bit.
1232 // On positive values, the mask is all zeros, so it's a no-op.
1233 left ^= (((left >> 127) as u128) >> 1) as i128;
1234 right ^= (((right >> 127) as u128) >> 1) as i128;
1235
1236 left.cmp(&right)
1237 }
1238
1239 /// Restrict a value to a certain interval unless it is NaN.
1240 ///
1241 /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
1242 /// less than `min`. Otherwise this returns `self`.
1243 ///
1244 /// Note that this function returns NaN if the initial value was NaN as
1245 /// well.
1246 ///
1247 /// # Panics
1248 ///
1249 /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
1250 ///
1251 /// # Examples
1252 ///
1253 /// ```
1254 /// #![feature(f128)]
1255 /// # // FIXME(f16_f128): remove when `{eq,gt,unord}tf` are available
1256 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1257 ///
1258 /// assert!((-3.0f128).clamp(-2.0, 1.0) == -2.0);
1259 /// assert!((0.0f128).clamp(-2.0, 1.0) == 0.0);
1260 /// assert!((2.0f128).clamp(-2.0, 1.0) == 1.0);
1261 /// assert!((f128::NAN).clamp(-2.0, 1.0).is_nan());
1262 /// # }
1263 /// ```
1264 #[inline]
1265 #[unstable(feature = "f128", issue = "116909")]
1266 #[must_use = "method returns a new number and does not mutate the original value"]
1267 pub const fn clamp(mut self, min: f128, max: f128) -> f128 {
1268 const_assert!(
1269 min <= max,
1270 "min > max, or either was NaN",
1271 "min > max, or either was NaN. min = {min:?}, max = {max:?}",
1272 min: f128,
1273 max: f128,
1274 );
1275
1276 if self < min {
1277 self = min;
1278 }
1279 if self > max {
1280 self = max;
1281 }
1282 self
1283 }
1284
1285 /// Computes the absolute value of `self`.
1286 ///
1287 /// This function always returns the precise result.
1288 ///
1289 /// # Examples
1290 ///
1291 /// ```
1292 /// #![feature(f128)]
1293 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1294 ///
1295 /// let x = 3.5_f128;
1296 /// let y = -3.5_f128;
1297 ///
1298 /// assert_eq!(x.abs(), x);
1299 /// assert_eq!(y.abs(), -y);
1300 ///
1301 /// assert!(f128::NAN.abs().is_nan());
1302 /// # }
1303 /// ```
1304 #[inline]
1305 #[unstable(feature = "f128", issue = "116909")]
1306 #[rustc_const_unstable(feature = "f128", issue = "116909")]
1307 #[must_use = "method returns a new number and does not mutate the original value"]
1308 pub const fn abs(self) -> Self {
1309 // FIXME(f16_f128): replace with `intrinsics::fabsf128` when available
1310 // We don't do this now because LLVM has lowering bugs for f128 math.
1311 Self::from_bits(self.to_bits() & !(1 << 127))
1312 }
1313
1314 /// Returns a number that represents the sign of `self`.
1315 ///
1316 /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
1317 /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
1318 /// - NaN if the number is NaN
1319 ///
1320 /// # Examples
1321 ///
1322 /// ```
1323 /// #![feature(f128)]
1324 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1325 ///
1326 /// let f = 3.5_f128;
1327 ///
1328 /// assert_eq!(f.signum(), 1.0);
1329 /// assert_eq!(f128::NEG_INFINITY.signum(), -1.0);
1330 ///
1331 /// assert!(f128::NAN.signum().is_nan());
1332 /// # }
1333 /// ```
1334 #[inline]
1335 #[unstable(feature = "f128", issue = "116909")]
1336 #[rustc_const_unstable(feature = "f128", issue = "116909")]
1337 #[must_use = "method returns a new number and does not mutate the original value"]
1338 pub const fn signum(self) -> f128 {
1339 if self.is_nan() { Self::NAN } else { 1.0_f128.copysign(self) }
1340 }
1341
1342 /// Returns a number composed of the magnitude of `self` and the sign of
1343 /// `sign`.
1344 ///
1345 /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise equal to `-self`.
1346 /// If `self` is a NaN, then a NaN with the same payload as `self` and the sign bit of `sign` is
1347 /// returned.
1348 ///
1349 /// If `sign` is a NaN, then this operation will still carry over its sign into the result. Note
1350 /// that IEEE 754 doesn't assign any meaning to the sign bit in case of a NaN, and as Rust
1351 /// doesn't guarantee that the bit pattern of NaNs are conserved over arithmetic operations, the
1352 /// result of `copysign` with `sign` being a NaN might produce an unexpected or non-portable
1353 /// result. See the [specification of NaN bit patterns](primitive@f32#nan-bit-patterns) for more
1354 /// info.
1355 ///
1356 /// # Examples
1357 ///
1358 /// ```
1359 /// #![feature(f128)]
1360 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1361 ///
1362 /// let f = 3.5_f128;
1363 ///
1364 /// assert_eq!(f.copysign(0.42), 3.5_f128);
1365 /// assert_eq!(f.copysign(-0.42), -3.5_f128);
1366 /// assert_eq!((-f).copysign(0.42), 3.5_f128);
1367 /// assert_eq!((-f).copysign(-0.42), -3.5_f128);
1368 ///
1369 /// assert!(f128::NAN.copysign(1.0).is_nan());
1370 /// # }
1371 /// ```
1372 #[inline]
1373 #[unstable(feature = "f128", issue = "116909")]
1374 #[rustc_const_unstable(feature = "f128", issue = "116909")]
1375 #[must_use = "method returns a new number and does not mutate the original value"]
1376 pub const fn copysign(self, sign: f128) -> f128 {
1377 // SAFETY: this is actually a safe intrinsic
1378 unsafe { intrinsics::copysignf128(self, sign) }
1379 }
1380
1381 /// Float addition that allows optimizations based on algebraic rules.
1382 ///
1383 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1384 #[must_use = "method returns a new number and does not mutate the original value"]
1385 #[unstable(feature = "float_algebraic", issue = "136469")]
1386 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1387 #[inline]
1388 pub const fn algebraic_add(self, rhs: f128) -> f128 {
1389 intrinsics::fadd_algebraic(self, rhs)
1390 }
1391
1392 /// Float subtraction that allows optimizations based on algebraic rules.
1393 ///
1394 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1395 #[must_use = "method returns a new number and does not mutate the original value"]
1396 #[unstable(feature = "float_algebraic", issue = "136469")]
1397 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1398 #[inline]
1399 pub const fn algebraic_sub(self, rhs: f128) -> f128 {
1400 intrinsics::fsub_algebraic(self, rhs)
1401 }
1402
1403 /// Float multiplication that allows optimizations based on algebraic rules.
1404 ///
1405 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1406 #[must_use = "method returns a new number and does not mutate the original value"]
1407 #[unstable(feature = "float_algebraic", issue = "136469")]
1408 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1409 #[inline]
1410 pub const fn algebraic_mul(self, rhs: f128) -> f128 {
1411 intrinsics::fmul_algebraic(self, rhs)
1412 }
1413
1414 /// Float division that allows optimizations based on algebraic rules.
1415 ///
1416 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1417 #[must_use = "method returns a new number and does not mutate the original value"]
1418 #[unstable(feature = "float_algebraic", issue = "136469")]
1419 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1420 #[inline]
1421 pub const fn algebraic_div(self, rhs: f128) -> f128 {
1422 intrinsics::fdiv_algebraic(self, rhs)
1423 }
1424
1425 /// Float remainder that allows optimizations based on algebraic rules.
1426 ///
1427 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1428 #[must_use = "method returns a new number and does not mutate the original value"]
1429 #[unstable(feature = "float_algebraic", issue = "136469")]
1430 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1431 #[inline]
1432 pub const fn algebraic_rem(self, rhs: f128) -> f128 {
1433 intrinsics::frem_algebraic(self, rhs)
1434 }
1435}
1436
1437// Functions in this module fall into `core_float_math`
1438// FIXME(f16_f128): all doctests must be gated to platforms that have `long double` === `_Float128`
1439// due to https://github.com/llvm/llvm-project/issues/44744. aarch64 linux matches this.
1440// #[unstable(feature = "core_float_math", issue = "137578")]
1441#[cfg(not(test))]
1442#[doc(test(attr(feature(cfg_target_has_reliable_f16_f128), expect(internal_features))))]
1443impl f128 {
1444 /// Returns the largest integer less than or equal to `self`.
1445 ///
1446 /// This function always returns the precise result.
1447 ///
1448 /// # Examples
1449 ///
1450 /// ```
1451 /// #![feature(f128)]
1452 /// # #[cfg(not(miri))]
1453 /// # #[cfg(target_has_reliable_f128_math)] {
1454 ///
1455 /// let f = 3.7_f128;
1456 /// let g = 3.0_f128;
1457 /// let h = -3.7_f128;
1458 ///
1459 /// assert_eq!(f.floor(), 3.0);
1460 /// assert_eq!(g.floor(), 3.0);
1461 /// assert_eq!(h.floor(), -4.0);
1462 /// # }
1463 /// ```
1464 #[inline]
1465 #[rustc_allow_incoherent_impl]
1466 #[unstable(feature = "f128", issue = "116909")]
1467 #[rustc_const_unstable(feature = "f128", issue = "116909")]
1468 #[must_use = "method returns a new number and does not mutate the original value"]
1469 pub const fn floor(self) -> f128 {
1470 // SAFETY: intrinsic with no preconditions
1471 unsafe { intrinsics::floorf128(self) }
1472 }
1473
1474 /// Returns the smallest integer greater than or equal to `self`.
1475 ///
1476 /// This function always returns the precise result.
1477 ///
1478 /// # Examples
1479 ///
1480 /// ```
1481 /// #![feature(f128)]
1482 /// # #[cfg(not(miri))]
1483 /// # #[cfg(target_has_reliable_f128_math)] {
1484 ///
1485 /// let f = 3.01_f128;
1486 /// let g = 4.0_f128;
1487 ///
1488 /// assert_eq!(f.ceil(), 4.0);
1489 /// assert_eq!(g.ceil(), 4.0);
1490 /// # }
1491 /// ```
1492 #[inline]
1493 #[doc(alias = "ceiling")]
1494 #[rustc_allow_incoherent_impl]
1495 #[unstable(feature = "f128", issue = "116909")]
1496 #[rustc_const_unstable(feature = "f128", issue = "116909")]
1497 #[must_use = "method returns a new number and does not mutate the original value"]
1498 pub const fn ceil(self) -> f128 {
1499 // SAFETY: intrinsic with no preconditions
1500 unsafe { intrinsics::ceilf128(self) }
1501 }
1502
1503 /// Returns the nearest integer to `self`. If a value is half-way between two
1504 /// integers, round away from `0.0`.
1505 ///
1506 /// This function always returns the precise result.
1507 ///
1508 /// # Examples
1509 ///
1510 /// ```
1511 /// #![feature(f128)]
1512 /// # #[cfg(not(miri))]
1513 /// # #[cfg(target_has_reliable_f128_math)] {
1514 ///
1515 /// let f = 3.3_f128;
1516 /// let g = -3.3_f128;
1517 /// let h = -3.7_f128;
1518 /// let i = 3.5_f128;
1519 /// let j = 4.5_f128;
1520 ///
1521 /// assert_eq!(f.round(), 3.0);
1522 /// assert_eq!(g.round(), -3.0);
1523 /// assert_eq!(h.round(), -4.0);
1524 /// assert_eq!(i.round(), 4.0);
1525 /// assert_eq!(j.round(), 5.0);
1526 /// # }
1527 /// ```
1528 #[inline]
1529 #[rustc_allow_incoherent_impl]
1530 #[unstable(feature = "f128", issue = "116909")]
1531 #[rustc_const_unstable(feature = "f128", issue = "116909")]
1532 #[must_use = "method returns a new number and does not mutate the original value"]
1533 pub const fn round(self) -> f128 {
1534 // SAFETY: intrinsic with no preconditions
1535 unsafe { intrinsics::roundf128(self) }
1536 }
1537
1538 /// Returns the nearest integer to a number. Rounds half-way cases to the number
1539 /// with an even least significant digit.
1540 ///
1541 /// This function always returns the precise result.
1542 ///
1543 /// # Examples
1544 ///
1545 /// ```
1546 /// #![feature(f128)]
1547 /// # #[cfg(not(miri))]
1548 /// # #[cfg(target_has_reliable_f128_math)] {
1549 ///
1550 /// let f = 3.3_f128;
1551 /// let g = -3.3_f128;
1552 /// let h = 3.5_f128;
1553 /// let i = 4.5_f128;
1554 ///
1555 /// assert_eq!(f.round_ties_even(), 3.0);
1556 /// assert_eq!(g.round_ties_even(), -3.0);
1557 /// assert_eq!(h.round_ties_even(), 4.0);
1558 /// assert_eq!(i.round_ties_even(), 4.0);
1559 /// # }
1560 /// ```
1561 #[inline]
1562 #[rustc_allow_incoherent_impl]
1563 #[unstable(feature = "f128", issue = "116909")]
1564 #[rustc_const_unstable(feature = "f128", issue = "116909")]
1565 #[must_use = "method returns a new number and does not mutate the original value"]
1566 pub const fn round_ties_even(self) -> f128 {
1567 intrinsics::round_ties_even_f128(self)
1568 }
1569
1570 /// Returns the integer part of `self`.
1571 /// This means that non-integer numbers are always truncated towards zero.
1572 ///
1573 /// This function always returns the precise result.
1574 ///
1575 /// # Examples
1576 ///
1577 /// ```
1578 /// #![feature(f128)]
1579 /// # #[cfg(not(miri))]
1580 /// # #[cfg(target_has_reliable_f128_math)] {
1581 ///
1582 /// let f = 3.7_f128;
1583 /// let g = 3.0_f128;
1584 /// let h = -3.7_f128;
1585 ///
1586 /// assert_eq!(f.trunc(), 3.0);
1587 /// assert_eq!(g.trunc(), 3.0);
1588 /// assert_eq!(h.trunc(), -3.0);
1589 /// # }
1590 /// ```
1591 #[inline]
1592 #[doc(alias = "truncate")]
1593 #[rustc_allow_incoherent_impl]
1594 #[unstable(feature = "f128", issue = "116909")]
1595 #[rustc_const_unstable(feature = "f128", issue = "116909")]
1596 #[must_use = "method returns a new number and does not mutate the original value"]
1597 pub const fn trunc(self) -> f128 {
1598 // SAFETY: intrinsic with no preconditions
1599 unsafe { intrinsics::truncf128(self) }
1600 }
1601
1602 /// Returns the fractional part of `self`.
1603 ///
1604 /// This function always returns the precise result.
1605 ///
1606 /// # Examples
1607 ///
1608 /// ```
1609 /// #![feature(f128)]
1610 /// # #[cfg(not(miri))]
1611 /// # #[cfg(target_has_reliable_f128_math)] {
1612 ///
1613 /// let x = 3.6_f128;
1614 /// let y = -3.6_f128;
1615 /// let abs_difference_x = (x.fract() - 0.6).abs();
1616 /// let abs_difference_y = (y.fract() - (-0.6)).abs();
1617 ///
1618 /// assert!(abs_difference_x <= f128::EPSILON);
1619 /// assert!(abs_difference_y <= f128::EPSILON);
1620 /// # }
1621 /// ```
1622 #[inline]
1623 #[rustc_allow_incoherent_impl]
1624 #[unstable(feature = "f128", issue = "116909")]
1625 #[rustc_const_unstable(feature = "f128", issue = "116909")]
1626 #[must_use = "method returns a new number and does not mutate the original value"]
1627 pub const fn fract(self) -> f128 {
1628 self - self.trunc()
1629 }
1630
1631 /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
1632 /// error, yielding a more accurate result than an unfused multiply-add.
1633 ///
1634 /// Using `mul_add` *may* be more performant than an unfused multiply-add if
1635 /// the target architecture has a dedicated `fma` CPU instruction. However,
1636 /// this is not always true, and will be heavily dependant on designing
1637 /// algorithms with specific target hardware in mind.
1638 ///
1639 /// # Precision
1640 ///
1641 /// The result of this operation is guaranteed to be the rounded
1642 /// infinite-precision result. It is specified by IEEE 754 as
1643 /// `fusedMultiplyAdd` and guaranteed not to change.
1644 ///
1645 /// # Examples
1646 ///
1647 /// ```
1648 /// #![feature(f128)]
1649 /// # #[cfg(not(miri))]
1650 /// # #[cfg(target_has_reliable_f128_math)] {
1651 ///
1652 /// let m = 10.0_f128;
1653 /// let x = 4.0_f128;
1654 /// let b = 60.0_f128;
1655 ///
1656 /// assert_eq!(m.mul_add(x, b), 100.0);
1657 /// assert_eq!(m * x + b, 100.0);
1658 ///
1659 /// let one_plus_eps = 1.0_f128 + f128::EPSILON;
1660 /// let one_minus_eps = 1.0_f128 - f128::EPSILON;
1661 /// let minus_one = -1.0_f128;
1662 ///
1663 /// // The exact result (1 + eps) * (1 - eps) = 1 - eps * eps.
1664 /// assert_eq!(one_plus_eps.mul_add(one_minus_eps, minus_one), -f128::EPSILON * f128::EPSILON);
1665 /// // Different rounding with the non-fused multiply and add.
1666 /// assert_eq!(one_plus_eps * one_minus_eps + minus_one, 0.0);
1667 /// # }
1668 /// ```
1669 #[inline]
1670 #[rustc_allow_incoherent_impl]
1671 #[doc(alias = "fmaf128", alias = "fusedMultiplyAdd")]
1672 #[unstable(feature = "f128", issue = "116909")]
1673 #[must_use = "method returns a new number and does not mutate the original value"]
1674 pub fn mul_add(self, a: f128, b: f128) -> f128 {
1675 // SAFETY: intrinsic with no preconditions
1676 unsafe { intrinsics::fmaf128(self, a, b) }
1677 }
1678
1679 /// Calculates Euclidean division, the matching method for `rem_euclid`.
1680 ///
1681 /// This computes the integer `n` such that
1682 /// `self = n * rhs + self.rem_euclid(rhs)`.
1683 /// In other words, the result is `self / rhs` rounded to the integer `n`
1684 /// such that `self >= n * rhs`.
1685 ///
1686 /// # Precision
1687 ///
1688 /// The result of this operation is guaranteed to be the rounded
1689 /// infinite-precision result.
1690 ///
1691 /// # Examples
1692 ///
1693 /// ```
1694 /// #![feature(f128)]
1695 /// # #[cfg(not(miri))]
1696 /// # #[cfg(target_has_reliable_f128_math)] {
1697 ///
1698 /// let a: f128 = 7.0;
1699 /// let b = 4.0;
1700 /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0
1701 /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0
1702 /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0
1703 /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0
1704 /// # }
1705 /// ```
1706 #[inline]
1707 #[rustc_allow_incoherent_impl]
1708 #[unstable(feature = "f128", issue = "116909")]
1709 #[must_use = "method returns a new number and does not mutate the original value"]
1710 pub fn div_euclid(self, rhs: f128) -> f128 {
1711 let q = (self / rhs).trunc();
1712 if self % rhs < 0.0 {
1713 return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
1714 }
1715 q
1716 }
1717
1718 /// Calculates the least nonnegative remainder of `self (mod rhs)`.
1719 ///
1720 /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in
1721 /// most cases. However, due to a floating point round-off error it can
1722 /// result in `r == rhs.abs()`, violating the mathematical definition, if
1723 /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`.
1724 /// This result is not an element of the function's codomain, but it is the
1725 /// closest floating point number in the real numbers and thus fulfills the
1726 /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
1727 /// approximately.
1728 ///
1729 /// # Precision
1730 ///
1731 /// The result of this operation is guaranteed to be the rounded
1732 /// infinite-precision result.
1733 ///
1734 /// # Examples
1735 ///
1736 /// ```
1737 /// #![feature(f128)]
1738 /// # #[cfg(not(miri))]
1739 /// # #[cfg(target_has_reliable_f128_math)] {
1740 ///
1741 /// let a: f128 = 7.0;
1742 /// let b = 4.0;
1743 /// assert_eq!(a.rem_euclid(b), 3.0);
1744 /// assert_eq!((-a).rem_euclid(b), 1.0);
1745 /// assert_eq!(a.rem_euclid(-b), 3.0);
1746 /// assert_eq!((-a).rem_euclid(-b), 1.0);
1747 /// // limitation due to round-off error
1748 /// assert!((-f128::EPSILON).rem_euclid(3.0) != 0.0);
1749 /// # }
1750 /// ```
1751 #[inline]
1752 #[rustc_allow_incoherent_impl]
1753 #[doc(alias = "modulo", alias = "mod")]
1754 #[unstable(feature = "f128", issue = "116909")]
1755 #[must_use = "method returns a new number and does not mutate the original value"]
1756 pub fn rem_euclid(self, rhs: f128) -> f128 {
1757 let r = self % rhs;
1758 if r < 0.0 { r + rhs.abs() } else { r }
1759 }
1760
1761 /// Raises a number to an integer power.
1762 ///
1763 /// Using this function is generally faster than using `powf`.
1764 /// It might have a different sequence of rounding operations than `powf`,
1765 /// so the results are not guaranteed to agree.
1766 ///
1767 /// # Unspecified precision
1768 ///
1769 /// The precision of this function is non-deterministic. This means it varies by platform,
1770 /// Rust version, and can even differ within the same execution from one invocation to the next.
1771 ///
1772 /// # Examples
1773 ///
1774 /// ```
1775 /// #![feature(f128)]
1776 /// # #[cfg(not(miri))]
1777 /// # #[cfg(target_has_reliable_f128_math)] {
1778 ///
1779 /// let x = 2.0_f128;
1780 /// let abs_difference = (x.powi(2) - (x * x)).abs();
1781 /// assert!(abs_difference <= f128::EPSILON);
1782 ///
1783 /// assert_eq!(f128::powi(f128::NAN, 0), 1.0);
1784 /// # }
1785 /// ```
1786 #[inline]
1787 #[rustc_allow_incoherent_impl]
1788 #[unstable(feature = "f128", issue = "116909")]
1789 #[must_use = "method returns a new number and does not mutate the original value"]
1790 pub fn powi(self, n: i32) -> f128 {
1791 // SAFETY: intrinsic with no preconditions
1792 unsafe { intrinsics::powif128(self, n) }
1793 }
1794
1795 /// Returns the square root of a number.
1796 ///
1797 /// Returns NaN if `self` is a negative number other than `-0.0`.
1798 ///
1799 /// # Precision
1800 ///
1801 /// The result of this operation is guaranteed to be the rounded
1802 /// infinite-precision result. It is specified by IEEE 754 as `squareRoot`
1803 /// and guaranteed not to change.
1804 ///
1805 /// # Examples
1806 ///
1807 /// ```
1808 /// #![feature(f128)]
1809 /// # #[cfg(not(miri))]
1810 /// # #[cfg(target_has_reliable_f128_math)] {
1811 ///
1812 /// let positive = 4.0_f128;
1813 /// let negative = -4.0_f128;
1814 /// let negative_zero = -0.0_f128;
1815 ///
1816 /// assert_eq!(positive.sqrt(), 2.0);
1817 /// assert!(negative.sqrt().is_nan());
1818 /// assert!(negative_zero.sqrt() == negative_zero);
1819 /// # }
1820 /// ```
1821 #[inline]
1822 #[doc(alias = "squareRoot")]
1823 #[rustc_allow_incoherent_impl]
1824 #[unstable(feature = "f128", issue = "116909")]
1825 #[must_use = "method returns a new number and does not mutate the original value"]
1826 pub fn sqrt(self) -> f128 {
1827 // SAFETY: intrinsic with no preconditions
1828 unsafe { intrinsics::sqrtf128(self) }
1829 }
1830}