rand/distr/
uniform_int.rs

1// Copyright 2018-2020 Developers of the Rand project.
2// Copyright 2017 The Rust Project Developers.
3//
4// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
5// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
7// option. This file may not be copied, modified, or distributed
8// except according to those terms.
9
10//! `UniformInt` implementation
11
12use super::{Error, SampleBorrow, SampleUniform, UniformSampler};
13use crate::distr::utils::WideningMultiply;
14#[cfg(feature = "simd_support")]
15use crate::distr::{Distribution, StandardUniform};
16use crate::Rng;
17
18#[cfg(feature = "simd_support")]
19use core::simd::prelude::*;
20#[cfg(feature = "simd_support")]
21use core::simd::{LaneCount, SupportedLaneCount};
22
23#[cfg(feature = "serde")]
24use serde::{Deserialize, Serialize};
25
26/// The back-end implementing [`UniformSampler`] for integer types.
27///
28/// Unless you are implementing [`UniformSampler`] for your own type, this type
29/// should not be used directly, use [`Uniform`] instead.
30///
31/// # Implementation notes
32///
33/// For simplicity, we use the same generic struct `UniformInt<X>` for all
34/// integer types `X`. This gives us only one field type, `X`; to store unsigned
35/// values of this size, we take use the fact that these conversions are no-ops.
36///
37/// For a closed range, the number of possible numbers we should generate is
38/// `range = (high - low + 1)`. To avoid bias, we must ensure that the size of
39/// our sample space, `zone`, is a multiple of `range`; other values must be
40/// rejected (by replacing with a new random sample).
41///
42/// As a special case, we use `range = 0` to represent the full range of the
43/// result type (i.e. for `new_inclusive($ty::MIN, $ty::MAX)`).
44///
45/// The optimum `zone` is the largest product of `range` which fits in our
46/// (unsigned) target type. We calculate this by calculating how many numbers we
47/// must reject: `reject = (MAX + 1) % range = (MAX - range + 1) % range`. Any (large)
48/// product of `range` will suffice, thus in `sample_single` we multiply by a
49/// power of 2 via bit-shifting (faster but may cause more rejections).
50///
51/// The smallest integer PRNGs generate is `u32`. For 8- and 16-bit outputs we
52/// use `u32` for our `zone` and samples (because it's not slower and because
53/// it reduces the chance of having to reject a sample). In this case we cannot
54/// store `zone` in the target type since it is too large, however we know
55/// `ints_to_reject < range <= $uty::MAX`.
56///
57/// An alternative to using a modulus is widening multiply: After a widening
58/// multiply by `range`, the result is in the high word. Then comparing the low
59/// word against `zone` makes sure our distribution is uniform.
60///
61/// # Bias
62///
63/// Unless the `unbiased` feature flag is used, outputs may have a small bias.
64/// In the worst case, bias affects 1 in `2^n` samples where n is
65/// 56 (`i8` and `u8`), 48 (`i16` and `u16`), 96 (`i32` and `u32`), 64 (`i64`
66/// and `u64`), 128 (`i128` and `u128`).
67///
68/// [`Uniform`]: super::Uniform
69#[derive(Clone, Copy, Debug, PartialEq, Eq)]
70#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
71pub struct UniformInt<X> {
72    pub(super) low: X,
73    pub(super) range: X,
74    thresh: X, // effectively 2.pow(max(64, uty_bits)) % range
75}
76
77macro_rules! uniform_int_impl {
78    ($ty:ty, $uty:ty, $sample_ty:ident) => {
79        impl SampleUniform for $ty {
80            type Sampler = UniformInt<$ty>;
81        }
82
83        impl UniformSampler for UniformInt<$ty> {
84            // We play free and fast with unsigned vs signed here
85            // (when $ty is signed), but that's fine, since the
86            // contract of this macro is for $ty and $uty to be
87            // "bit-equal", so casting between them is a no-op.
88
89            type X = $ty;
90
91            #[inline] // if the range is constant, this helps LLVM to do the
92                      // calculations at compile-time.
93            fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
94            where
95                B1: SampleBorrow<Self::X> + Sized,
96                B2: SampleBorrow<Self::X> + Sized,
97            {
98                let low = *low_b.borrow();
99                let high = *high_b.borrow();
100                if !(low < high) {
101                    return Err(Error::EmptyRange);
102                }
103                UniformSampler::new_inclusive(low, high - 1)
104            }
105
106            #[inline] // if the range is constant, this helps LLVM to do the
107                      // calculations at compile-time.
108            fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
109            where
110                B1: SampleBorrow<Self::X> + Sized,
111                B2: SampleBorrow<Self::X> + Sized,
112            {
113                let low = *low_b.borrow();
114                let high = *high_b.borrow();
115                if !(low <= high) {
116                    return Err(Error::EmptyRange);
117                }
118
119                let range = high.wrapping_sub(low).wrapping_add(1) as $uty;
120                let thresh = if range > 0 {
121                    let range = $sample_ty::from(range);
122                    (range.wrapping_neg() % range)
123                } else {
124                    0
125                };
126
127                Ok(UniformInt {
128                    low,
129                    range: range as $ty,           // type: $uty
130                    thresh: thresh as $uty as $ty, // type: $sample_ty
131                })
132            }
133
134            /// Sample from distribution, Lemire's method, unbiased
135            #[inline]
136            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
137                let range = self.range as $uty as $sample_ty;
138                if range == 0 {
139                    return rng.random();
140                }
141
142                let thresh = self.thresh as $uty as $sample_ty;
143                let hi = loop {
144                    let (hi, lo) = rng.random::<$sample_ty>().wmul(range);
145                    if lo >= thresh {
146                        break hi;
147                    }
148                };
149                self.low.wrapping_add(hi as $ty)
150            }
151
152            #[inline]
153            fn sample_single<R: Rng + ?Sized, B1, B2>(
154                low_b: B1,
155                high_b: B2,
156                rng: &mut R,
157            ) -> Result<Self::X, Error>
158            where
159                B1: SampleBorrow<Self::X> + Sized,
160                B2: SampleBorrow<Self::X> + Sized,
161            {
162                let low = *low_b.borrow();
163                let high = *high_b.borrow();
164                if !(low < high) {
165                    return Err(Error::EmptyRange);
166                }
167                Self::sample_single_inclusive(low, high - 1, rng)
168            }
169
170            /// Sample single value, Canon's method, biased
171            ///
172            /// In the worst case, bias affects 1 in `2^n` samples where n is
173            /// 56 (`i8`), 48 (`i16`), 96 (`i32`), 64 (`i64`), 128 (`i128`).
174            #[cfg(not(feature = "unbiased"))]
175            #[inline]
176            fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(
177                low_b: B1,
178                high_b: B2,
179                rng: &mut R,
180            ) -> Result<Self::X, Error>
181            where
182                B1: SampleBorrow<Self::X> + Sized,
183                B2: SampleBorrow<Self::X> + Sized,
184            {
185                let low = *low_b.borrow();
186                let high = *high_b.borrow();
187                if !(low <= high) {
188                    return Err(Error::EmptyRange);
189                }
190                let range = high.wrapping_sub(low).wrapping_add(1) as $uty as $sample_ty;
191                if range == 0 {
192                    // Range is MAX+1 (unrepresentable), so we need a special case
193                    return Ok(rng.random());
194                }
195
196                // generate a sample using a sensible integer type
197                let (mut result, lo_order) = rng.random::<$sample_ty>().wmul(range);
198
199                // if the sample is biased...
200                if lo_order > range.wrapping_neg() {
201                    // ...generate a new sample to reduce bias...
202                    let (new_hi_order, _) = (rng.random::<$sample_ty>()).wmul(range as $sample_ty);
203                    // ... incrementing result on overflow
204                    let is_overflow = lo_order.checked_add(new_hi_order as $sample_ty).is_none();
205                    result += is_overflow as $sample_ty;
206                }
207
208                Ok(low.wrapping_add(result as $ty))
209            }
210
211            /// Sample single value, Canon's method, unbiased
212            #[cfg(feature = "unbiased")]
213            #[inline]
214            fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(
215                low_b: B1,
216                high_b: B2,
217                rng: &mut R,
218            ) -> Result<Self::X, Error>
219            where
220                B1: SampleBorrow<$ty> + Sized,
221                B2: SampleBorrow<$ty> + Sized,
222            {
223                let low = *low_b.borrow();
224                let high = *high_b.borrow();
225                if !(low <= high) {
226                    return Err(Error::EmptyRange);
227                }
228                let range = high.wrapping_sub(low).wrapping_add(1) as $uty as $sample_ty;
229                if range == 0 {
230                    // Range is MAX+1 (unrepresentable), so we need a special case
231                    return Ok(rng.random());
232                }
233
234                let (mut result, mut lo) = rng.random::<$sample_ty>().wmul(range);
235
236                // In contrast to the biased sampler, we use a loop:
237                while lo > range.wrapping_neg() {
238                    let (new_hi, new_lo) = (rng.random::<$sample_ty>()).wmul(range);
239                    match lo.checked_add(new_hi) {
240                        Some(x) if x < $sample_ty::MAX => {
241                            // Anything less than MAX: last term is 0
242                            break;
243                        }
244                        None => {
245                            // Overflow: last term is 1
246                            result += 1;
247                            break;
248                        }
249                        _ => {
250                            // Unlikely case: must check next sample
251                            lo = new_lo;
252                            continue;
253                        }
254                    }
255                }
256
257                Ok(low.wrapping_add(result as $ty))
258            }
259        }
260    };
261}
262
263uniform_int_impl! { i8, u8, u32 }
264uniform_int_impl! { i16, u16, u32 }
265uniform_int_impl! { i32, u32, u32 }
266uniform_int_impl! { i64, u64, u64 }
267uniform_int_impl! { i128, u128, u128 }
268uniform_int_impl! { u8, u8, u32 }
269uniform_int_impl! { u16, u16, u32 }
270uniform_int_impl! { u32, u32, u32 }
271uniform_int_impl! { u64, u64, u64 }
272uniform_int_impl! { u128, u128, u128 }
273
274#[cfg(feature = "simd_support")]
275macro_rules! uniform_simd_int_impl {
276    ($ty:ident, $unsigned:ident) => {
277        // The "pick the largest zone that can fit in an `u32`" optimization
278        // is less useful here. Multiple lanes complicate things, we don't
279        // know the PRNG's minimal output size, and casting to a larger vector
280        // is generally a bad idea for SIMD performance. The user can still
281        // implement it manually.
282
283        #[cfg(feature = "simd_support")]
284        impl<const LANES: usize> SampleUniform for Simd<$ty, LANES>
285        where
286            LaneCount<LANES>: SupportedLaneCount,
287            Simd<$unsigned, LANES>:
288                WideningMultiply<Output = (Simd<$unsigned, LANES>, Simd<$unsigned, LANES>)>,
289            StandardUniform: Distribution<Simd<$unsigned, LANES>>,
290        {
291            type Sampler = UniformInt<Simd<$ty, LANES>>;
292        }
293
294        #[cfg(feature = "simd_support")]
295        impl<const LANES: usize> UniformSampler for UniformInt<Simd<$ty, LANES>>
296        where
297            LaneCount<LANES>: SupportedLaneCount,
298            Simd<$unsigned, LANES>:
299                WideningMultiply<Output = (Simd<$unsigned, LANES>, Simd<$unsigned, LANES>)>,
300            StandardUniform: Distribution<Simd<$unsigned, LANES>>,
301        {
302            type X = Simd<$ty, LANES>;
303
304            #[inline] // if the range is constant, this helps LLVM to do the
305                      // calculations at compile-time.
306            fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
307                where B1: SampleBorrow<Self::X> + Sized,
308                      B2: SampleBorrow<Self::X> + Sized
309            {
310                let low = *low_b.borrow();
311                let high = *high_b.borrow();
312                if !(low.simd_lt(high).all()) {
313                    return Err(Error::EmptyRange);
314                }
315                UniformSampler::new_inclusive(low, high - Simd::splat(1))
316            }
317
318            #[inline] // if the range is constant, this helps LLVM to do the
319                      // calculations at compile-time.
320            fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
321                where B1: SampleBorrow<Self::X> + Sized,
322                      B2: SampleBorrow<Self::X> + Sized
323            {
324                let low = *low_b.borrow();
325                let high = *high_b.borrow();
326                if !(low.simd_le(high).all()) {
327                    return Err(Error::EmptyRange);
328                }
329
330                // NOTE: all `Simd` operations are inherently wrapping,
331                //       see https://doc.rust-lang.org/std/simd/struct.Simd.html
332                let range: Simd<$unsigned, LANES> = ((high - low) + Simd::splat(1)).cast();
333
334                // We must avoid divide-by-zero by using 0 % 1 == 0.
335                let not_full_range = range.simd_gt(Simd::splat(0));
336                let modulo = not_full_range.select(range, Simd::splat(1));
337                let ints_to_reject = range.wrapping_neg() % modulo;
338
339                Ok(UniformInt {
340                    low,
341                    // These are really $unsigned values, but store as $ty:
342                    range: range.cast(),
343                    thresh: ints_to_reject.cast(),
344                })
345            }
346
347            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
348                let range: Simd<$unsigned, LANES> = self.range.cast();
349                let thresh: Simd<$unsigned, LANES> = self.thresh.cast();
350
351                // This might seem very slow, generating a whole new
352                // SIMD vector for every sample rejection. For most uses
353                // though, the chance of rejection is small and provides good
354                // general performance. With multiple lanes, that chance is
355                // multiplied. To mitigate this, we replace only the lanes of
356                // the vector which fail, iteratively reducing the chance of
357                // rejection. The replacement method does however add a little
358                // overhead. Benchmarking or calculating probabilities might
359                // reveal contexts where this replacement method is slower.
360                let mut v: Simd<$unsigned, LANES> = rng.random();
361                loop {
362                    let (hi, lo) = v.wmul(range);
363                    let mask = lo.simd_ge(thresh);
364                    if mask.all() {
365                        let hi: Simd<$ty, LANES> = hi.cast();
366                        // wrapping addition
367                        let result = self.low + hi;
368                        // `select` here compiles to a blend operation
369                        // When `range.eq(0).none()` the compare and blend
370                        // operations are avoided.
371                        let v: Simd<$ty, LANES> = v.cast();
372                        return range.simd_gt(Simd::splat(0)).select(result, v);
373                    }
374                    // Replace only the failing lanes
375                    v = mask.select(v, rng.random());
376                }
377            }
378        }
379    };
380
381    // bulk implementation
382    ($(($unsigned:ident, $signed:ident)),+) => {
383        $(
384            uniform_simd_int_impl!($unsigned, $unsigned);
385            uniform_simd_int_impl!($signed, $unsigned);
386        )+
387    };
388}
389
390#[cfg(feature = "simd_support")]
391uniform_simd_int_impl! { (u8, i8), (u16, i16), (u32, i32), (u64, i64) }
392
393/// The back-end implementing [`UniformSampler`] for `usize`.
394///
395/// # Implementation notes
396///
397/// Sampling a `usize` value is usually used in relation to the length of an
398/// array or other memory structure, thus it is reasonable to assume that the
399/// vast majority of use-cases will have a maximum size under [`u32::MAX`].
400/// In part to optimise for this use-case, but mostly to ensure that results
401/// are portable across 32-bit and 64-bit architectures (as far as is possible),
402/// this implementation will use 32-bit sampling when possible.
403#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))]
404#[derive(Clone, Copy, Debug, PartialEq, Eq)]
405#[cfg_attr(all(feature = "serde"), derive(Serialize))]
406// To be able to deserialize on 32-bit we need to replace this with a custom
407// implementation of the Deserialize trait, to be able to:
408// - panic when `mode64` is `true` on 32-bit,
409// - assign the default value to `mode64` when it's missing on 64-bit,
410// - panic when the `usize` fields are greater than `u32::MAX` on 32-bit.
411#[cfg_attr(
412    all(feature = "serde", target_pointer_width = "64"),
413    derive(Deserialize)
414)]
415pub struct UniformUsize {
416    /// The lowest possible value.
417    low: usize,
418    /// The number of possible values. `0` has a special meaning: all.
419    range: usize,
420    /// Threshold used when sampling to obtain a uniform distribution.
421    thresh: usize,
422    /// Whether the largest possible value is greater than `u32::MAX`.
423    #[cfg(target_pointer_width = "64")]
424    // Handle missing field when deserializing on 64-bit an object serialized
425    // on 32-bit. Can be removed when switching to a custom deserializer.
426    #[cfg_attr(feature = "serde", serde(default))]
427    mode64: bool,
428}
429
430#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))]
431impl SampleUniform for usize {
432    type Sampler = UniformUsize;
433}
434
435#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))]
436impl UniformSampler for UniformUsize {
437    type X = usize;
438
439    #[inline] // if the range is constant, this helps LLVM to do the
440              // calculations at compile-time.
441    fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
442    where
443        B1: SampleBorrow<Self::X> + Sized,
444        B2: SampleBorrow<Self::X> + Sized,
445    {
446        let low = *low_b.borrow();
447        let high = *high_b.borrow();
448        if !(low < high) {
449            return Err(Error::EmptyRange);
450        }
451
452        UniformSampler::new_inclusive(low, high - 1)
453    }
454
455    #[inline] // if the range is constant, this helps LLVM to do the
456              // calculations at compile-time.
457    fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
458    where
459        B1: SampleBorrow<Self::X> + Sized,
460        B2: SampleBorrow<Self::X> + Sized,
461    {
462        let low = *low_b.borrow();
463        let high = *high_b.borrow();
464        if !(low <= high) {
465            return Err(Error::EmptyRange);
466        }
467
468        #[cfg(target_pointer_width = "64")]
469        let mode64 = high > (u32::MAX as usize);
470        #[cfg(target_pointer_width = "32")]
471        let mode64 = false;
472
473        let (range, thresh);
474        if cfg!(target_pointer_width = "64") && !mode64 {
475            let range32 = (high as u32).wrapping_sub(low as u32).wrapping_add(1);
476            range = range32 as usize;
477            thresh = if range32 > 0 {
478                (range32.wrapping_neg() % range32) as usize
479            } else {
480                0
481            };
482        } else {
483            range = high.wrapping_sub(low).wrapping_add(1);
484            thresh = if range > 0 {
485                range.wrapping_neg() % range
486            } else {
487                0
488            };
489        }
490
491        Ok(UniformUsize {
492            low,
493            range,
494            thresh,
495            #[cfg(target_pointer_width = "64")]
496            mode64,
497        })
498    }
499
500    #[inline]
501    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
502        #[cfg(target_pointer_width = "32")]
503        let mode32 = true;
504        #[cfg(target_pointer_width = "64")]
505        let mode32 = !self.mode64;
506
507        if mode32 {
508            let range = self.range as u32;
509            if range == 0 {
510                return rng.random::<u32>() as usize;
511            }
512
513            let thresh = self.thresh as u32;
514            let hi = loop {
515                let (hi, lo) = rng.random::<u32>().wmul(range);
516                if lo >= thresh {
517                    break hi;
518                }
519            };
520            self.low.wrapping_add(hi as usize)
521        } else {
522            let range = self.range as u64;
523            if range == 0 {
524                return rng.random::<u64>() as usize;
525            }
526
527            let thresh = self.thresh as u64;
528            let hi = loop {
529                let (hi, lo) = rng.random::<u64>().wmul(range);
530                if lo >= thresh {
531                    break hi;
532                }
533            };
534            self.low.wrapping_add(hi as usize)
535        }
536    }
537
538    #[inline]
539    fn sample_single<R: Rng + ?Sized, B1, B2>(
540        low_b: B1,
541        high_b: B2,
542        rng: &mut R,
543    ) -> Result<Self::X, Error>
544    where
545        B1: SampleBorrow<Self::X> + Sized,
546        B2: SampleBorrow<Self::X> + Sized,
547    {
548        let low = *low_b.borrow();
549        let high = *high_b.borrow();
550        if !(low < high) {
551            return Err(Error::EmptyRange);
552        }
553
554        if cfg!(target_pointer_width = "64") && high > (u32::MAX as usize) {
555            return UniformInt::<u64>::sample_single(low as u64, high as u64, rng)
556                .map(|x| x as usize);
557        }
558
559        UniformInt::<u32>::sample_single(low as u32, high as u32, rng).map(|x| x as usize)
560    }
561
562    #[inline]
563    fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(
564        low_b: B1,
565        high_b: B2,
566        rng: &mut R,
567    ) -> Result<Self::X, Error>
568    where
569        B1: SampleBorrow<Self::X> + Sized,
570        B2: SampleBorrow<Self::X> + Sized,
571    {
572        let low = *low_b.borrow();
573        let high = *high_b.borrow();
574        if !(low <= high) {
575            return Err(Error::EmptyRange);
576        }
577
578        if cfg!(target_pointer_width = "64") && high > (u32::MAX as usize) {
579            return UniformInt::<u64>::sample_single_inclusive(low as u64, high as u64, rng)
580                .map(|x| x as usize);
581        }
582
583        UniformInt::<u32>::sample_single_inclusive(low as u32, high as u32, rng).map(|x| x as usize)
584    }
585}
586
587#[cfg(test)]
588mod tests {
589    use super::*;
590    use crate::distr::{Distribution, Uniform};
591    use core::fmt::Debug;
592    use core::ops::Add;
593
594    #[test]
595    fn test_uniform_bad_limits_equal_int() {
596        assert_eq!(Uniform::new(10, 10), Err(Error::EmptyRange));
597    }
598
599    #[test]
600    fn test_uniform_good_limits_equal_int() {
601        let mut rng = crate::test::rng(804);
602        let dist = Uniform::new_inclusive(10, 10).unwrap();
603        for _ in 0..20 {
604            assert_eq!(rng.sample(dist), 10);
605        }
606    }
607
608    #[test]
609    fn test_uniform_bad_limits_flipped_int() {
610        assert_eq!(Uniform::new(10, 5), Err(Error::EmptyRange));
611    }
612
613    #[test]
614    #[cfg_attr(miri, ignore)] // Miri is too slow
615    fn test_integers() {
616        let mut rng = crate::test::rng(251);
617        macro_rules! t {
618            ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{
619                for &(low, high) in $v.iter() {
620                    let my_uniform = Uniform::new(low, high).unwrap();
621                    for _ in 0..1000 {
622                        let v: $ty = rng.sample(my_uniform);
623                        assert!($le(low, v) && $lt(v, high));
624                    }
625
626                    let my_uniform = Uniform::new_inclusive(low, high).unwrap();
627                    for _ in 0..1000 {
628                        let v: $ty = rng.sample(my_uniform);
629                        assert!($le(low, v) && $le(v, high));
630                    }
631
632                    let my_uniform = Uniform::new(&low, high).unwrap();
633                    for _ in 0..1000 {
634                        let v: $ty = rng.sample(my_uniform);
635                        assert!($le(low, v) && $lt(v, high));
636                    }
637
638                    let my_uniform = Uniform::new_inclusive(&low, &high).unwrap();
639                    for _ in 0..1000 {
640                        let v: $ty = rng.sample(my_uniform);
641                        assert!($le(low, v) && $le(v, high));
642                    }
643
644                    for _ in 0..1000 {
645                        let v = <$ty as SampleUniform>::Sampler::sample_single(low, high, &mut rng).unwrap();
646                        assert!($le(low, v) && $lt(v, high));
647                    }
648
649                    for _ in 0..1000 {
650                        let v = <$ty as SampleUniform>::Sampler::sample_single_inclusive(low, high, &mut rng).unwrap();
651                        assert!($le(low, v) && $le(v, high));
652                    }
653                }
654            }};
655
656            // scalar bulk
657            ($($ty:ident),*) => {{
658                $(t!(
659                    $ty,
660                    [(0, 10), (10, 127), ($ty::MIN, $ty::MAX)],
661                    |x, y| x <= y,
662                    |x, y| x < y
663                );)*
664            }};
665
666            // simd bulk
667            ($($ty:ident),* => $scalar:ident) => {{
668                $(t!(
669                    $ty,
670                    [
671                        ($ty::splat(0), $ty::splat(10)),
672                        ($ty::splat(10), $ty::splat(127)),
673                        ($ty::splat($scalar::MIN), $ty::splat($scalar::MAX)),
674                    ],
675                    |x: $ty, y| x.simd_le(y).all(),
676                    |x: $ty, y| x.simd_lt(y).all()
677                );)*
678            }};
679        }
680        t!(i8, i16, i32, i64, i128, u8, u16, u32, u64, usize, u128);
681
682        #[cfg(feature = "simd_support")]
683        {
684            t!(u8x4, u8x8, u8x16, u8x32, u8x64 => u8);
685            t!(i8x4, i8x8, i8x16, i8x32, i8x64 => i8);
686            t!(u16x2, u16x4, u16x8, u16x16, u16x32 => u16);
687            t!(i16x2, i16x4, i16x8, i16x16, i16x32 => i16);
688            t!(u32x2, u32x4, u32x8, u32x16 => u32);
689            t!(i32x2, i32x4, i32x8, i32x16 => i32);
690            t!(u64x2, u64x4, u64x8 => u64);
691            t!(i64x2, i64x4, i64x8 => i64);
692        }
693    }
694
695    #[test]
696    fn test_uniform_from_std_range() {
697        let r = Uniform::try_from(2u32..7).unwrap();
698        assert_eq!(r.0.low, 2);
699        assert_eq!(r.0.range, 5);
700    }
701
702    #[test]
703    fn test_uniform_from_std_range_bad_limits() {
704        #![allow(clippy::reversed_empty_ranges)]
705        assert!(Uniform::try_from(100..10).is_err());
706        assert!(Uniform::try_from(100..100).is_err());
707    }
708
709    #[test]
710    fn test_uniform_from_std_range_inclusive() {
711        let r = Uniform::try_from(2u32..=6).unwrap();
712        assert_eq!(r.0.low, 2);
713        assert_eq!(r.0.range, 5);
714    }
715
716    #[test]
717    fn test_uniform_from_std_range_inclusive_bad_limits() {
718        #![allow(clippy::reversed_empty_ranges)]
719        assert!(Uniform::try_from(100..=10).is_err());
720        assert!(Uniform::try_from(100..=99).is_err());
721    }
722
723    #[test]
724    fn value_stability() {
725        fn test_samples<T: SampleUniform + Copy + Debug + PartialEq + Add<T>>(
726            lb: T,
727            ub: T,
728            ub_excl: T,
729            expected: &[T],
730        ) where
731            Uniform<T>: Distribution<T>,
732        {
733            let mut rng = crate::test::rng(897);
734            let mut buf = [lb; 6];
735
736            for x in &mut buf[0..3] {
737                *x = T::Sampler::sample_single_inclusive(lb, ub, &mut rng).unwrap();
738            }
739
740            let distr = Uniform::new_inclusive(lb, ub).unwrap();
741            for x in &mut buf[3..6] {
742                *x = rng.sample(&distr);
743            }
744            assert_eq!(&buf, expected);
745
746            let mut rng = crate::test::rng(897);
747
748            for x in &mut buf[0..3] {
749                *x = T::Sampler::sample_single(lb, ub_excl, &mut rng).unwrap();
750            }
751
752            let distr = Uniform::new(lb, ub_excl).unwrap();
753            for x in &mut buf[3..6] {
754                *x = rng.sample(&distr);
755            }
756            assert_eq!(&buf, expected);
757        }
758
759        test_samples(-105i8, 111, 112, &[-99, -48, 107, 72, -19, 56]);
760        test_samples(2i16, 1352, 1353, &[43, 361, 1325, 1109, 539, 1005]);
761        test_samples(
762            -313853i32,
763            13513,
764            13514,
765            &[-303803, -226673, 6912, -45605, -183505, -70668],
766        );
767        test_samples(
768            131521i64,
769            6542165,
770            6542166,
771            &[1838724, 5384489, 4893692, 3712948, 3951509, 4094926],
772        );
773        test_samples(
774            -0x8000_0000_0000_0000_0000_0000_0000_0000i128,
775            -1,
776            0,
777            &[
778                -30725222750250982319765550926688025855,
779                -75088619368053423329503924805178012357,
780                -64950748766625548510467638647674468829,
781                -41794017901603587121582892414659436495,
782                -63623852319608406524605295913876414006,
783                -17404679390297612013597359206379189023,
784            ],
785        );
786        test_samples(11u8, 218, 219, &[17, 66, 214, 181, 93, 165]);
787        test_samples(11u16, 218, 219, &[17, 66, 214, 181, 93, 165]);
788        test_samples(11u32, 218, 219, &[17, 66, 214, 181, 93, 165]);
789        test_samples(11u64, 218, 219, &[66, 181, 165, 127, 134, 139]);
790        test_samples(11u128, 218, 219, &[181, 127, 139, 167, 141, 197]);
791        test_samples(11usize, 218, 219, &[17, 66, 214, 181, 93, 165]);
792
793        #[cfg(feature = "simd_support")]
794        {
795            let lb = Simd::from([11u8, 0, 128, 127]);
796            let ub = Simd::from([218, 254, 254, 254]);
797            let ub_excl = ub + Simd::splat(1);
798            test_samples(
799                lb,
800                ub,
801                ub_excl,
802                &[
803                    Simd::from([13, 5, 237, 130]),
804                    Simd::from([126, 186, 149, 161]),
805                    Simd::from([103, 86, 234, 252]),
806                    Simd::from([35, 18, 225, 231]),
807                    Simd::from([106, 153, 246, 177]),
808                    Simd::from([195, 168, 149, 222]),
809                ],
810            );
811        }
812    }
813
814    #[test]
815    fn test_uniform_usize_empty_range() {
816        assert_eq!(UniformUsize::new(10, 10), Err(Error::EmptyRange));
817        assert!(UniformUsize::new(10, 11).is_ok());
818
819        assert_eq!(UniformUsize::new_inclusive(10, 9), Err(Error::EmptyRange));
820        assert!(UniformUsize::new_inclusive(10, 10).is_ok());
821    }
822
823    #[test]
824    fn test_uniform_usize_constructors() {
825        assert_eq!(
826            UniformUsize::new_inclusive(u32::MAX as usize, u32::MAX as usize),
827            Ok(UniformUsize {
828                low: u32::MAX as usize,
829                range: 1,
830                thresh: 0,
831                #[cfg(target_pointer_width = "64")]
832                mode64: false
833            })
834        );
835        assert_eq!(
836            UniformUsize::new_inclusive(0, u32::MAX as usize),
837            Ok(UniformUsize {
838                low: 0,
839                range: 0,
840                thresh: 0,
841                #[cfg(target_pointer_width = "64")]
842                mode64: false
843            })
844        );
845        #[cfg(target_pointer_width = "64")]
846        assert_eq!(
847            UniformUsize::new_inclusive(0, u32::MAX as usize + 1),
848            Ok(UniformUsize {
849                low: 0,
850                range: u32::MAX as usize + 2,
851                thresh: 1,
852                mode64: true
853            })
854        );
855        #[cfg(target_pointer_width = "64")]
856        assert_eq!(
857            UniformUsize::new_inclusive(u32::MAX as usize, u64::MAX as usize),
858            Ok(UniformUsize {
859                low: u32::MAX as usize,
860                range: u64::MAX as usize - u32::MAX as usize + 1,
861                thresh: u32::MAX as usize,
862                mode64: true
863            })
864        );
865    }
866
867    // This could be run also on 32-bit when deserialization is implemented.
868    #[cfg(all(feature = "serde", target_pointer_width = "64"))]
869    #[test]
870    fn test_uniform_usize_deserialization() {
871        use serde_json;
872        let original = UniformUsize::new_inclusive(10, 100).expect("creation");
873        let serialized = serde_json::to_string(&original).expect("serialization");
874        let deserialized: UniformUsize =
875            serde_json::from_str(&serialized).expect("deserialization");
876        assert_eq!(deserialized, original);
877    }
878
879    #[cfg(all(feature = "serde", target_pointer_width = "64"))]
880    #[test]
881    fn test_uniform_usize_deserialization_from_32bit() {
882        use serde_json;
883        let serialized_on_32bit = r#"{"low":10,"range":91,"thresh":74}"#;
884        let deserialized: UniformUsize =
885            serde_json::from_str(&serialized_on_32bit).expect("deserialization");
886        assert_eq!(
887            deserialized,
888            UniformUsize::new_inclusive(10, 100).expect("creation")
889        );
890    }
891
892    #[cfg(all(feature = "serde", target_pointer_width = "64"))]
893    #[test]
894    fn test_uniform_usize_deserialization_64bit() {
895        use serde_json;
896        let original = UniformUsize::new_inclusive(1, u64::MAX as usize - 1).expect("creation");
897        assert!(original.mode64);
898        let serialized = serde_json::to_string(&original).expect("serialization");
899        let deserialized: UniformUsize =
900            serde_json::from_str(&serialized).expect("deserialization");
901        assert_eq!(deserialized, original);
902    }
903}