Files
abomonation
abomonation_derive
ansi_term
async_trait
atty
bincode
bitflags
byteorder
bytes
cfg_if
chrono
clap
dirs
dirs_sys
erdos
fixedbitset
fnv
futures
futures_channel
futures_core
futures_executor
futures_io
futures_macro
futures_sink
futures_task
futures_util
async_await
future
io
lock
sink
stream
task
indexmap
iovec
lazy_static
libc
log
memchr
mio
net2
num_cpus
num_integer
num_traits
petgraph
pin_project_lite
pin_utils
proc_macro2
proc_macro_hack
proc_macro_nested
quote
rand
rand_chacha
rand_core
rand_hc
rand_isaac
rand_jitter
rand_os
rand_pcg
rand_xorshift
serde
serde_derive
sha1
slab
slog
slog_term
strsim
syn
synstructure
term
textwrap
thread_local
time
tokio
future
io
loom
macros
net
park
runtime
stream
sync
task
time
util
tokio_macros
tokio_serde
tokio_serde_bincode
tokio_util
unicode_width
unicode_xid
uuid
vec_map
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
// Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

//! The binomial distribution.

use Rng;
use distributions::{Distribution, Bernoulli, Cauchy};
use distributions::utils::log_gamma;

/// The binomial distribution `Binomial(n, p)`.
///
/// This distribution has density function:
/// `f(k) = n!/(k! (n-k)!) p^k (1-p)^(n-k)` for `k >= 0`.
///
/// # Example
///
/// ```
/// use rand::distributions::{Binomial, Distribution};
///
/// let bin = Binomial::new(20, 0.3);
/// let v = bin.sample(&mut rand::thread_rng());
/// println!("{} is from a binomial distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
pub struct Binomial {
    /// Number of trials.
    n: u64,
    /// Probability of success.
    p: f64,
}

impl Binomial {
    /// Construct a new `Binomial` with the given shape parameters `n` (number
    /// of trials) and `p` (probability of success).
    ///
    /// Panics if `p < 0` or `p > 1`.
    pub fn new(n: u64, p: f64) -> Binomial {
        assert!(p >= 0.0, "Binomial::new called with p < 0");
        assert!(p <= 1.0, "Binomial::new called with p > 1");
        Binomial { n, p }
    }
}

impl Distribution<u64> for Binomial {
    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
        // Handle these values directly.
        if self.p == 0.0 {
            return 0;
        } else if self.p == 1.0 {
            return self.n;
        }
        
        // For low n, it is faster to sample directly. For both methods,
        // performance is independent of p. On Intel Haswell CPU this method
        // appears to be faster for approx n < 300.
        if self.n < 300 {
            let mut result = 0;
            let d = Bernoulli::new(self.p);
            for _ in 0 .. self.n {
                result += rng.sample(d) as u32;
            }
            return result as u64;
        }
        
        // binomial distribution is symmetrical with respect to p -> 1-p, k -> n-k
        // switch p so that it is less than 0.5 - this allows for lower expected values
        // we will just invert the result at the end
        let p = if self.p <= 0.5 {
            self.p
        } else {
            1.0 - self.p
        };

        // prepare some cached values
        let float_n = self.n as f64;
        let ln_fact_n = log_gamma(float_n + 1.0);
        let pc = 1.0 - p;
        let log_p = p.ln();
        let log_pc = pc.ln();
        let expected = self.n as f64 * p;
        let sq = (expected * (2.0 * pc)).sqrt();

        let mut lresult;

        // we use the Cauchy distribution as the comparison distribution
        // f(x) ~ 1/(1+x^2)
        let cauchy = Cauchy::new(0.0, 1.0);
        loop {
            let mut comp_dev: f64;
            loop {
                // draw from the Cauchy distribution
                comp_dev = rng.sample(cauchy);
                // shift the peak of the comparison ditribution
                lresult = expected + sq * comp_dev;
                // repeat the drawing until we are in the range of possible values
                if lresult >= 0.0 && lresult < float_n + 1.0 {
                    break;
                }
            }

            // the result should be discrete
            lresult = lresult.floor();

            let log_binomial_dist = ln_fact_n - log_gamma(lresult+1.0) -
                log_gamma(float_n - lresult + 1.0) + lresult*log_p + (float_n - lresult)*log_pc;
            // this is the binomial probability divided by the comparison probability
            // we will generate a uniform random value and if it is larger than this,
            // we interpret it as a value falling out of the distribution and repeat
            let comparison_coeff = (log_binomial_dist.exp() * sq) * (1.2 * (1.0 + comp_dev*comp_dev));

            if comparison_coeff >= rng.gen() {
                break;
            }
        }

        // invert the result for p < 0.5
        if p != self.p {
            self.n - lresult as u64
        } else {
            lresult as u64
        }
    }
}

#[cfg(test)]
mod test {
    use Rng;
    use distributions::Distribution;
    use super::Binomial;

    fn test_binomial_mean_and_variance<R: Rng>(n: u64, p: f64, rng: &mut R) {
        let binomial = Binomial::new(n, p);

        let expected_mean = n as f64 * p;
        let expected_variance = n as f64 * p * (1.0 - p);

        let mut results = [0.0; 1000];
        for i in results.iter_mut() { *i = binomial.sample(rng) as f64; }

        let mean = results.iter().sum::<f64>() / results.len() as f64;
        assert!((mean as f64 - expected_mean).abs() < expected_mean / 50.0);

        let variance =
            results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>()
            / results.len() as f64;
        assert!((variance - expected_variance).abs() < expected_variance / 10.0);
    }

    #[test]
    fn test_binomial() {
        let mut rng = ::test::rng(351);
        test_binomial_mean_and_variance(150, 0.1, &mut rng);
        test_binomial_mean_and_variance(70, 0.6, &mut rng);
        test_binomial_mean_and_variance(40, 0.5, &mut rng);
        test_binomial_mean_and_variance(20, 0.7, &mut rng);
        test_binomial_mean_and_variance(20, 0.5, &mut rng);
    }

    #[test]
    fn test_binomial_end_points() {
        let mut rng = ::test::rng(352);
        assert_eq!(rng.sample(Binomial::new(20, 0.0)), 0);
        assert_eq!(rng.sample(Binomial::new(20, 1.0)), 20);
    }

    #[test]
    #[should_panic]
    fn test_binomial_invalid_lambda_neg() {
        Binomial::new(20, -10.0);
    }
}