增加环绕侦察场景适配

This commit is contained in:
2026-01-08 15:44:38 +08:00
parent 3eba1f962b
commit 10c5bb5a8a
5441 changed files with 40219 additions and 379695 deletions

View File

@@ -92,7 +92,6 @@ def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64):
Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The
default dtype value is 'd'
"""
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
cdef np.ndarray randoms

View File

@@ -1,4 +1,4 @@
from collections.abc import Callable
from collections.abc import Callable, MutableSequence
from typing import Any, Literal, TypeAlias, TypeVar, overload
import numpy as np
@@ -68,134 +68,134 @@ class Generator:
@overload
def standard_normal( # type: ignore[misc]
self,
size: None = ...,
size: None = None,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
out: None = None,
) -> float: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: _ShapeLike = ...,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def standard_normal( # type: ignore[misc]
self,
*,
out: NDArray[float64] = ...,
out: NDArray[float64] | None = None,
) -> NDArray[float64]: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: _ShapeLike = ...,
size: _ShapeLike | None = None,
dtype: _DTypeLikeFloat32 = ...,
out: NDArray[float32] | None = ...,
out: NDArray[float32] | None = None,
) -> NDArray[float32]: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: _ShapeLike = ...,
size: _ShapeLike | None = None,
dtype: _DTypeLikeFloat64 = ...,
out: NDArray[float64] | None = ...,
out: NDArray[float64] | None = None,
) -> NDArray[float64]: ...
@overload
def permutation(self, x: int, axis: int = ...) -> NDArray[int64]: ...
def permutation(self, x: int, axis: int = 0) -> NDArray[int64]: ...
@overload
def permutation(self, x: ArrayLike, axis: int = ...) -> NDArray[Any]: ...
def permutation(self, x: ArrayLike, axis: int = 0) -> NDArray[Any]: ...
@overload
def standard_exponential( # type: ignore[misc]
self,
size: None = ...,
size: None = None,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
method: Literal["zig", "inv"] = ...,
out: None = ...,
method: Literal["zig", "inv"] = "zig",
out: None = None,
) -> float: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def standard_exponential(
self,
*,
out: NDArray[float64] = ...,
out: NDArray[float64] | None = None,
) -> NDArray[float64]: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
size: _ShapeLike | None = None,
*,
method: Literal["zig", "inv"] = ...,
out: NDArray[float64] | None = ...,
method: Literal["zig", "inv"] = "zig",
out: NDArray[float64] | None = None,
) -> NDArray[float64]: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
size: _ShapeLike | None = None,
dtype: _DTypeLikeFloat32 = ...,
method: Literal["zig", "inv"] = ...,
out: NDArray[float32] | None = ...,
method: Literal["zig", "inv"] = "zig",
out: NDArray[float32] | None = None,
) -> NDArray[float32]: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
size: _ShapeLike | None = None,
dtype: _DTypeLikeFloat64 = ...,
method: Literal["zig", "inv"] = ...,
out: NDArray[float64] | None = ...,
method: Literal["zig", "inv"] = "zig",
out: NDArray[float64] | None = None,
) -> NDArray[float64]: ...
@overload
def random( # type: ignore[misc]
self,
size: None = ...,
size: None = None,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
out: None = None,
) -> float: ...
@overload
def random(
self,
*,
out: NDArray[float64] = ...,
out: NDArray[float64] | None = None,
) -> NDArray[float64]: ...
@overload
def random(
self,
size: _ShapeLike = ...,
size: _ShapeLike | None = None,
*,
out: NDArray[float64] | None = ...,
out: NDArray[float64] | None = None,
) -> NDArray[float64]: ...
@overload
def random(
self,
size: _ShapeLike = ...,
size: _ShapeLike | None = None,
dtype: _DTypeLikeFloat32 = ...,
out: NDArray[float32] | None = ...,
out: NDArray[float32] | None = None,
) -> NDArray[float32]: ...
@overload
def random(
self,
size: _ShapeLike = ...,
size: _ShapeLike | None = None,
dtype: _DTypeLikeFloat64 = ...,
out: NDArray[float64] | None = ...,
out: NDArray[float64] | None = None,
) -> NDArray[float64]: ...
@overload
def beta(
self,
a: _FloatLike_co,
b: _FloatLike_co,
size: None = ...,
size: None = None,
) -> float: ... # type: ignore[misc]
@overload
def beta(
self,
a: _ArrayLikeFloat_co,
b: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc]
@overload
def exponential(self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...) -> NDArray[float64]: ...
def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ...
#
@overload
@@ -462,7 +462,7 @@ class Generator:
low: int,
high: int | None = None,
size: None = None,
dtype: DTypeLike = ...,
dtype: DTypeLike | None = ...,
endpoint: bool = False,
) -> Any: ...
@overload
@@ -471,7 +471,7 @@ class Generator:
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: DTypeLike = ...,
dtype: DTypeLike | None = ...,
endpoint: bool = False,
) -> NDArray[Any]: ...
@@ -481,135 +481,136 @@ class Generator:
def choice(
self,
a: int,
size: None = ...,
replace: bool = ...,
p: _ArrayLikeFloat_co | None = ...,
axis: int = ...,
shuffle: bool = ...,
size: None = None,
replace: bool = True,
p: _ArrayLikeFloat_co | None = None,
axis: int = 0,
shuffle: bool = True,
) -> int: ...
@overload
def choice(
self,
a: int,
size: _ShapeLike = ...,
replace: bool = ...,
p: _ArrayLikeFloat_co | None = ...,
axis: int = ...,
shuffle: bool = ...,
size: _ShapeLike | None = None,
replace: bool = True,
p: _ArrayLikeFloat_co | None = None,
axis: int = 0,
shuffle: bool = True,
) -> NDArray[int64]: ...
@overload
def choice(
self,
a: ArrayLike,
size: None = ...,
replace: bool = ...,
p: _ArrayLikeFloat_co | None = ...,
axis: int = ...,
shuffle: bool = ...,
size: None = None,
replace: bool = True,
p: _ArrayLikeFloat_co | None = None,
axis: int = 0,
shuffle: bool = True,
) -> Any: ...
@overload
def choice(
self,
a: ArrayLike,
size: _ShapeLike = ...,
replace: bool = ...,
p: _ArrayLikeFloat_co | None = ...,
axis: int = ...,
shuffle: bool = ...,
size: _ShapeLike | None = None,
replace: bool = True,
p: _ArrayLikeFloat_co | None = None,
axis: int = 0,
shuffle: bool = True,
) -> NDArray[Any]: ...
@overload
def uniform(
self,
low: _FloatLike_co = ...,
high: _FloatLike_co = ...,
size: None = ...,
low: _FloatLike_co = 0.0,
high: _FloatLike_co = 1.0,
size: None = None,
) -> float: ... # type: ignore[misc]
@overload
def uniform(
self,
low: _ArrayLikeFloat_co = ...,
high: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
low: _ArrayLikeFloat_co = 0.0,
high: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def normal(
self,
loc: _FloatLike_co = ...,
scale: _FloatLike_co = ...,
size: None = ...,
loc: _FloatLike_co = 0.0,
scale: _FloatLike_co = 1.0,
size: None = None,
) -> float: ... # type: ignore[misc]
@overload
def normal(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
loc: _ArrayLikeFloat_co = 0.0,
scale: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def standard_gamma( # type: ignore[misc]
self,
shape: _FloatLike_co,
size: None = ...,
size: None = None,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
out: None = None,
) -> float: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
*,
out: NDArray[float64] = ...,
out: NDArray[float64] | None = None,
) -> NDArray[float64]: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...,
size: _ShapeLike | None = None,
dtype: _DTypeLikeFloat32 = ...,
out: NDArray[float32] | None = ...,
out: NDArray[float32] | None = None,
) -> NDArray[float32]: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...,
size: _ShapeLike | None = None,
dtype: _DTypeLikeFloat64 = ...,
out: NDArray[float64] | None = ...,
out: NDArray[float64] | None = None,
) -> NDArray[float64]: ...
@overload
def gamma(
self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...
self, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def gamma(
self,
shape: _ArrayLikeFloat_co,
scale: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
scale: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def f(
self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...
self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def f(
self,
dfnum: _ArrayLikeFloat_co,
dfden: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def noncentral_f(
self,
dfnum: _FloatLike_co,
dfden: _FloatLike_co,
nonc: _FloatLike_co, size: None = ...
nonc: _FloatLike_co,
size: None = None,
) -> float: ... # type: ignore[misc]
@overload
def noncentral_f(
@@ -617,140 +618,140 @@ class Generator:
dfnum: _ArrayLikeFloat_co,
dfden: _ArrayLikeFloat_co,
nonc: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc]
@overload
def chisquare(
self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def noncentral_chisquare(
self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...
self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def noncentral_chisquare(
self,
df: _ArrayLikeFloat_co,
nonc: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc]
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: None = ...
self, df: _ArrayLikeFloat_co, size: None = None
) -> NDArray[float64]: ...
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def vonmises(
self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...
self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def vonmises(
self,
mu: _ArrayLikeFloat_co,
kappa: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc]
@overload
def pareto(
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc]
@overload
def weibull(
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
def power(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc]
@overload
def power(
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc]
@overload
def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ...
def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ...
@overload
def laplace(
self,
loc: _FloatLike_co = ...,
scale: _FloatLike_co = ...,
size: None = ...,
loc: _FloatLike_co = 0.0,
scale: _FloatLike_co = 1.0,
size: None = None,
) -> float: ... # type: ignore[misc]
@overload
def laplace(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
loc: _ArrayLikeFloat_co = 0.0,
scale: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def gumbel(
self,
loc: _FloatLike_co = ...,
scale: _FloatLike_co = ...,
size: None = ...,
loc: _FloatLike_co = 0.0,
scale: _FloatLike_co = 1.0,
size: None = None,
) -> float: ... # type: ignore[misc]
@overload
def gumbel(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
loc: _ArrayLikeFloat_co = 0.0,
scale: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def logistic(
self,
loc: _FloatLike_co = ...,
scale: _FloatLike_co = ...,
size: None = ...,
loc: _FloatLike_co = 0.0,
scale: _FloatLike_co = 1.0,
size: None = None,
) -> float: ... # type: ignore[misc]
@overload
def logistic(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
loc: _ArrayLikeFloat_co = 0.0,
scale: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def lognormal(
self,
mean: _FloatLike_co = ...,
sigma: _FloatLike_co = ...,
size: None = ...,
mean: _FloatLike_co = 0.0,
sigma: _FloatLike_co = 1.0,
size: None = None,
) -> float: ... # type: ignore[misc]
@overload
def lognormal(
self,
mean: _ArrayLikeFloat_co = ...,
sigma: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
mean: _ArrayLikeFloat_co = 0.0,
sigma: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc]
@overload
def rayleigh(
self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...
self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def wald(
self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...
self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def wald(
self,
mean: _ArrayLikeFloat_co,
scale: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def triangular(
@@ -758,7 +759,7 @@ class Generator:
left: _FloatLike_co,
mode: _FloatLike_co,
right: _FloatLike_co,
size: None = ...,
size: None = None,
) -> float: ... # type: ignore[misc]
@overload
def triangular(
@@ -766,46 +767,46 @@ class Generator:
left: _ArrayLikeFloat_co,
mode: _ArrayLikeFloat_co,
right: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc]
@overload
def binomial(
self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[int64]: ...
@overload
def negative_binomial(
self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...
self, n: _FloatLike_co, p: _FloatLike_co, size: None = None
) -> int: ... # type: ignore[misc]
@overload
def negative_binomial(
self,
n: _ArrayLikeFloat_co,
p: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[int64]: ...
@overload
def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc]
def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... # type: ignore[misc]
@overload
def poisson(
self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...
self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None
) -> NDArray[int64]: ...
@overload
def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc]
@overload
def zipf(
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[int64]: ...
@overload
def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc]
@overload
def geometric(
self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[int64]: ...
@overload
def hypergeometric(
self, ngood: int, nbad: int, nsample: int, size: None = ...
self, ngood: int, nbad: int, nsample: int, size: None = None
) -> int: ... # type: ignore[misc]
@overload
def hypergeometric(
@@ -813,44 +814,49 @@ class Generator:
ngood: _ArrayLikeInt_co,
nbad: _ArrayLikeInt_co,
nsample: _ArrayLikeInt_co,
size: _ShapeLike | None = ...,
size: _ShapeLike | None = None,
) -> NDArray[int64]: ...
@overload
def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc]
@overload
def logseries(
self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[int64]: ...
def multivariate_normal(
self,
mean: _ArrayLikeFloat_co,
cov: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...,
check_valid: Literal["warn", "raise", "ignore"] = ...,
tol: float = ...,
size: _ShapeLike | None = None,
check_valid: Literal["warn", "raise", "ignore"] = "warn",
tol: float = 1e-8,
*,
method: Literal["svd", "eigh", "cholesky"] = ...,
method: Literal["svd", "eigh", "cholesky"] = "svd",
) -> NDArray[float64]: ...
def multinomial(
self, n: _ArrayLikeInt_co,
pvals: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[int64]: ...
def multivariate_hypergeometric(
self,
colors: _ArrayLikeInt_co,
nsample: int,
size: _ShapeLike | None = ...,
method: Literal["marginals", "count"] = ...,
size: _ShapeLike | None = None,
method: Literal["marginals", "count"] = "marginals",
) -> NDArray[int64]: ...
def dirichlet(
self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
def permuted(
self, x: ArrayLike, *, axis: int | None = ..., out: NDArray[Any] | None = ...
self, x: ArrayLike, *, axis: int | None = None, out: NDArray[Any] | None = None
) -> NDArray[Any]: ...
def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ...
# axis must be 0 for MutableSequence
@overload
def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ...
@overload
def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ...
def default_rng(
seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = ...
seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None
) -> Generator: ...

View File

@@ -5,6 +5,8 @@ from numpy._typing import _ArrayLikeInt_co
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy.typing import NDArray
__all__ = ["MT19937"]
@type_check_only
class _MT19937Internal(TypedDict):
key: NDArray[uint32]
@@ -18,8 +20,8 @@ class _MT19937State(TypedDict):
class MT19937(BitGenerator):
def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ...
def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ...
def jumped(self, jumps: int = ...) -> MT19937: ...
@property
def jumped(self, jumps: int = 1) -> MT19937: ...
@property # type: ignore[override]
def state(self) -> _MT19937State: ...
@state.setter
def state(self, value: _MT19937State) -> None: ...

View File

@@ -3,6 +3,8 @@ from typing import TypedDict, type_check_only
from numpy._typing import _ArrayLikeInt_co
from numpy.random.bit_generator import BitGenerator, SeedSequence
__all__ = ["PCG64"]
@type_check_only
class _PCG64Internal(TypedDict):
state: int
@@ -17,8 +19,8 @@ class _PCG64State(TypedDict):
class PCG64(BitGenerator):
def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ...
def jumped(self, jumps: int = ...) -> PCG64: ...
@property
def jumped(self, jumps: int = 1) -> PCG64: ...
@property # type: ignore[override]
def state(
self,
) -> _PCG64State: ...
@@ -31,14 +33,9 @@ class PCG64(BitGenerator):
class PCG64DXSM(BitGenerator):
def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ...
def jumped(self, jumps: int = ...) -> PCG64DXSM: ...
@property
def state(
self,
) -> _PCG64State: ...
def jumped(self, jumps: int = 1) -> PCG64DXSM: ...
@property # type: ignore[override]
def state(self) -> _PCG64State: ...
@state.setter
def state(
self,
value: _PCG64State,
) -> None: ...
def state(self, value: _PCG64State) -> None: ...
def advance(self, delta: int) -> PCG64DXSM: ...

View File

@@ -5,6 +5,8 @@ from numpy._typing import _ArrayLikeInt_co
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy.typing import NDArray
__all__ = ["Philox"]
@type_check_only
class _PhiloxInternal(TypedDict):
counter: NDArray[uint64]
@@ -26,14 +28,9 @@ class Philox(BitGenerator):
counter: _ArrayLikeInt_co | None = ...,
key: _ArrayLikeInt_co | None = ...,
) -> None: ...
@property
def state(
self,
) -> _PhiloxState: ...
@property # type: ignore[override]
def state(self) -> _PhiloxState: ...
@state.setter
def state(
self,
value: _PhiloxState,
) -> None: ...
def jumped(self, jumps: int = ...) -> Philox: ...
def state(self, value: _PhiloxState) -> None: ...
def jumped(self, jumps: int = 1) -> Philox: ...
def advance(self, delta: int) -> Philox: ...

View File

@@ -4,6 +4,8 @@ from numpy import uint64
from numpy._typing import NDArray, _ArrayLikeInt_co
from numpy.random.bit_generator import BitGenerator, SeedSequence
__all__ = ["SFC64"]
@type_check_only
class _SFC64Internal(TypedDict):
state: NDArray[uint64]
@@ -17,12 +19,7 @@ class _SFC64State(TypedDict):
class SFC64(BitGenerator):
def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ...
@property
def state(
self,
) -> _SFC64State: ...
@property # type: ignore[override]
def state(self) -> _SFC64State: ...
@state.setter
def state(
self,
value: _SFC64State,
) -> None: ...
def state(self, value: _SFC64State) -> None: ...

View File

@@ -31,5 +31,5 @@ cdef class SeedSequence():
np.ndarray[np.npy_uint32, ndim=1] entropy_array)
cdef get_assembled_entropy(self)
cdef class SeedlessSequence():
cdef class SeedlessSeedSequence:
pass

View File

@@ -1,4 +1,5 @@
import abc
from _typeshed import Incomplete
from collections.abc import Callable, Mapping, Sequence
from threading import Lock
from typing import (
@@ -12,8 +13,6 @@ from typing import (
overload,
type_check_only,
)
from _typeshed import Incomplete
from typing_extensions import CapsuleType
import numpy as np

View File

@@ -42,6 +42,62 @@ from numpy._typing import (
)
from numpy.random.bit_generator import BitGenerator
__all__ = [
"RandomState",
"beta",
"binomial",
"bytes",
"chisquare",
"choice",
"dirichlet",
"exponential",
"f",
"gamma",
"geometric",
"get_bit_generator",
"get_state",
"gumbel",
"hypergeometric",
"laplace",
"logistic",
"lognormal",
"logseries",
"multinomial",
"multivariate_normal",
"negative_binomial",
"noncentral_chisquare",
"noncentral_f",
"normal",
"pareto",
"permutation",
"poisson",
"power",
"rand",
"randint",
"randn",
"random",
"random_integers",
"random_sample",
"ranf",
"rayleigh",
"sample",
"seed",
"set_bit_generator",
"set_state",
"shuffle",
"standard_cauchy",
"standard_exponential",
"standard_gamma",
"standard_normal",
"standard_t",
"triangular",
"uniform",
"vonmises",
"wald",
"weibull",
"zipf",
]
class RandomState:
_bit_generator: BitGenerator
def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = ...) -> None: ...
@@ -50,45 +106,45 @@ class RandomState:
def __getstate__(self) -> dict[str, Any]: ...
def __setstate__(self, state: dict[str, Any]) -> None: ...
def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... # noqa: E501
def seed(self, seed: _ArrayLikeFloat_co | None = ...) -> None: ...
def seed(self, seed: _ArrayLikeFloat_co | None = None) -> None: ...
@overload
def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ...
def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ...
@overload
def get_state(
self, legacy: Literal[True] = ...
self, legacy: Literal[True] = True
) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ...
def set_state(
self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]
) -> None: ...
@overload
def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc]
def random_sample(self, size: None = None) -> float: ... # type: ignore[misc]
@overload
def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ...
@overload
def random(self, size: None = ...) -> float: ... # type: ignore[misc]
def random(self, size: None = None) -> float: ... # type: ignore[misc]
@overload
def random(self, size: _ShapeLike) -> NDArray[float64]: ...
@overload
def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
def beta(self, a: float, b: float, size: None = None) -> float: ... # type: ignore[misc]
@overload
def beta(
self,
a: _ArrayLikeFloat_co,
b: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
def exponential(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc]
@overload
def exponential(
self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...
self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc]
def standard_exponential(self, size: None = None) -> float: ... # type: ignore[misc]
@overload
def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ...
@overload
def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc]
def tomaxint(self, size: None = None) -> int: ... # type: ignore[misc]
@overload
# Generates long values, but stores it in a 64bit int:
def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ...
@@ -96,222 +152,222 @@ class RandomState:
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
) -> int: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: type[bool] = ...,
) -> bool: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: type[np.bool] = ...,
) -> np.bool: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: type[int] = ...,
) -> int: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501
) -> uint8: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501
) -> uint16: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501
) -> uint32: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501
) -> uint: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501
) -> ulong: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501
) -> uint64: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501
) -> int8: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501
) -> int16: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501
) -> int32: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501
) -> int_: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501
) -> long: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: int | None = ...,
size: None = ...,
high: int | None = None,
size: None = None,
dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501
) -> int64: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
) -> NDArray[long]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: _DTypeLikeBool = ...,
) -> NDArray[np.bool]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501
) -> NDArray[int8]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501
) -> NDArray[int16]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501
) -> NDArray[int32]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501
) -> NDArray[int64]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501
) -> NDArray[uint8]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501
) -> NDArray[uint16]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501
) -> NDArray[uint32]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501
) -> NDArray[uint64]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501
) -> NDArray[long]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501
) -> NDArray[ulong]: ...
def bytes(self, length: int) -> builtins.bytes: ...
@@ -319,44 +375,44 @@ class RandomState:
def choice(
self,
a: int,
size: None = ...,
replace: bool = ...,
p: _ArrayLikeFloat_co | None = ...,
size: None = None,
replace: bool = True,
p: _ArrayLikeFloat_co | None = None,
) -> int: ...
@overload
def choice(
self,
a: int,
size: _ShapeLike = ...,
replace: bool = ...,
p: _ArrayLikeFloat_co | None = ...,
size: _ShapeLike | None = None,
replace: bool = True,
p: _ArrayLikeFloat_co | None = None,
) -> NDArray[long]: ...
@overload
def choice(
self,
a: ArrayLike,
size: None = ...,
replace: bool = ...,
p: _ArrayLikeFloat_co | None = ...,
size: None = None,
replace: bool = True,
p: _ArrayLikeFloat_co | None = None,
) -> Any: ...
@overload
def choice(
self,
a: ArrayLike,
size: _ShapeLike = ...,
replace: bool = ...,
p: _ArrayLikeFloat_co | None = ...,
size: _ShapeLike | None = None,
replace: bool = True,
p: _ArrayLikeFloat_co | None = None,
) -> NDArray[Any]: ...
@overload
def uniform(
self, low: float = ..., high: float = ..., size: None = ...
self, low: float = 0.0, high: float = 1.0, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def uniform(
self,
low: _ArrayLikeFloat_co = ...,
high: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
low: _ArrayLikeFloat_co = 0.0,
high: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def rand(self) -> float: ...
@@ -368,65 +424,65 @@ class RandomState:
def randn(self, *args: int) -> NDArray[float64]: ...
@overload
def random_integers(
self, low: int, high: int | None = ..., size: None = ...
self, low: int, high: int | None = None, size: None = None
) -> int: ... # type: ignore[misc]
@overload
def random_integers(
self,
low: _ArrayLikeInt_co,
high: _ArrayLikeInt_co | None = ...,
size: _ShapeLike | None = ...,
high: _ArrayLikeInt_co | None = None,
size: _ShapeLike | None = None,
) -> NDArray[long]: ...
@overload
def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc]
def standard_normal(self, size: None = None) -> float: ... # type: ignore[misc]
@overload
def standard_normal( # type: ignore[misc]
self, size: _ShapeLike = ...
self, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def normal(
self, loc: float = ..., scale: float = ..., size: None = ...
self, loc: float = 0.0, scale: float = 1.0, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def normal(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
loc: _ArrayLikeFloat_co = 0.0,
scale: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def standard_gamma( # type: ignore[misc]
self,
shape: float,
size: None = ...,
size: None = None,
) -> float: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc]
@overload
def gamma(
self,
shape: _ArrayLikeFloat_co,
scale: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
scale: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... # type: ignore[misc]
@overload
def f(
self,
dfnum: _ArrayLikeFloat_co,
dfden: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def noncentral_f(
self, dfnum: float, dfden: float, nonc: float, size: None = ...
self, dfnum: float, dfden: float, nonc: float, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def noncentral_f(
@@ -434,128 +490,128 @@ class RandomState:
dfnum: _ArrayLikeFloat_co,
dfden: _ArrayLikeFloat_co,
nonc: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
def chisquare(self, df: float, size: None = None) -> float: ... # type: ignore[misc]
@overload
def chisquare(
self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def noncentral_chisquare(
self, df: float, nonc: float, size: None = ...
self, df: float, nonc: float, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def noncentral_chisquare(
self,
df: _ArrayLikeFloat_co,
nonc: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
def standard_t(self, df: float, size: None = None) -> float: ... # type: ignore[misc]
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: None = ...
self, df: _ArrayLikeFloat_co, size: None = None
) -> NDArray[float64]: ...
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... # type: ignore[misc]
@overload
def vonmises(
self,
mu: _ArrayLikeFloat_co,
kappa: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
def pareto(self, a: float, size: None = None) -> float: ... # type: ignore[misc]
@overload
def pareto(
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
def weibull(self, a: float, size: None = None) -> float: ... # type: ignore[misc]
@overload
def weibull(
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
def power(self, a: float, size: None = None) -> float: ... # type: ignore[misc]
@overload
def power(
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc]
@overload
def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ...
def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ...
@overload
def laplace(
self, loc: float = ..., scale: float = ..., size: None = ...
self, loc: float = 0.0, scale: float = 1.0, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def laplace(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
loc: _ArrayLikeFloat_co = 0.0,
scale: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def gumbel(
self, loc: float = ..., scale: float = ..., size: None = ...
self, loc: float = 0.0, scale: float = 1.0, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def gumbel(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
loc: _ArrayLikeFloat_co = 0.0,
scale: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def logistic(
self, loc: float = ..., scale: float = ..., size: None = ...
self, loc: float = 0.0, scale: float = 1.0, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def logistic(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
loc: _ArrayLikeFloat_co = 0.0,
scale: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def lognormal(
self, mean: float = ..., sigma: float = ..., size: None = ...
self, mean: float = 0.0, sigma: float = 1.0, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def lognormal(
self,
mean: _ArrayLikeFloat_co = ...,
sigma: _ArrayLikeFloat_co = ...,
size: _ShapeLike | None = ...,
mean: _ArrayLikeFloat_co = 0.0,
sigma: _ArrayLikeFloat_co = 1.0,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc]
@overload
def rayleigh(
self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...
self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
def wald(self, mean: float, scale: float, size: None = None) -> float: ... # type: ignore[misc]
@overload
def wald(
self,
mean: _ArrayLikeFloat_co,
scale: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[float64]: ...
@overload
def triangular(
self, left: float, mode: float, right: float, size: None = ...
self, left: float, mode: float, right: float, size: None = None
) -> float: ... # type: ignore[misc]
@overload
def triangular(
@@ -563,50 +619,50 @@ class RandomState:
left: _ArrayLikeFloat_co,
mode: _ArrayLikeFloat_co,
right: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...,
size: _ShapeLike | None = None,
) -> NDArray[float64]: ...
@overload
def binomial(
self, n: int, p: float, size: None = ...
self, n: int, p: float, size: None = None
) -> int: ... # type: ignore[misc]
@overload
def binomial(
self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[long]: ...
@overload
def negative_binomial(
self, n: float, p: float, size: None = ...
self, n: float, p: float, size: None = None
) -> int: ... # type: ignore[misc]
@overload
def negative_binomial(
self,
n: _ArrayLikeFloat_co,
p: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[long]: ...
@overload
def poisson(
self, lam: float = ..., size: None = ...
self, lam: float = 1.0, size: None = None
) -> int: ... # type: ignore[misc]
@overload
def poisson(
self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...
self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None
) -> NDArray[long]: ...
@overload
def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
def zipf(self, a: float, size: None = None) -> int: ... # type: ignore[misc]
@overload
def zipf(
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[long]: ...
@overload
def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
def geometric(self, p: float, size: None = None) -> int: ... # type: ignore[misc]
@overload
def geometric(
self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[long]: ...
@overload
def hypergeometric(
self, ngood: int, nbad: int, nsample: int, size: None = ...
self, ngood: int, nbad: int, nsample: int, size: None = None
) -> int: ... # type: ignore[misc]
@overload
def hypergeometric(
@@ -614,29 +670,29 @@ class RandomState:
ngood: _ArrayLikeInt_co,
nbad: _ArrayLikeInt_co,
nsample: _ArrayLikeInt_co,
size: _ShapeLike | None = ...,
size: _ShapeLike | None = None,
) -> NDArray[long]: ...
@overload
def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
def logseries(self, p: float, size: None = None) -> int: ... # type: ignore[misc]
@overload
def logseries(
self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[long]: ...
def multivariate_normal(
self,
mean: _ArrayLikeFloat_co,
cov: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...,
check_valid: Literal["warn", "raise", "ignore"] = ...,
tol: float = ...,
size: _ShapeLike | None = None,
check_valid: Literal["warn", "raise", "ignore"] = "warn",
tol: float = 1e-8,
) -> NDArray[float64]: ...
def multinomial(
self, n: _ArrayLikeInt_co,
pvals: _ArrayLikeFloat_co,
size: _ShapeLike | None = ...
size: _ShapeLike | None = None
) -> NDArray[long]: ...
def dirichlet(
self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ...
self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None
) -> NDArray[float64]: ...
def shuffle(self, x: ArrayLike) -> None: ...
@overload

View File

@@ -572,6 +572,9 @@ class TestDefaultRNG:
assert rg2 is rg
assert rg2.bit_generator is bg
@pytest.mark.thread_unsafe(
reason="np.random.set_bit_generator affects global state"
)
def test_coercion_RandomState_Generator(self):
# use default_rng to coerce RandomState to Generator
rs = RandomState(1234)

View File

@@ -57,6 +57,10 @@ else:
@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64',
reason='Meson unable to find MSVC linker on win-arm64')
@pytest.mark.slow
@pytest.mark.thread_unsafe(
reason="building cython code in a subprocess doesn't make sense to do in many "
"threads and sometimes crashes"
)
def test_cython(tmp_path):
import glob
# build the examples in a temporary directory

View File

@@ -1,6 +1,7 @@
import hashlib
import os.path
import sys
import warnings
import pytest
@@ -17,8 +18,6 @@ from numpy.testing import (
assert_equal,
assert_no_warnings,
assert_raises,
assert_warns,
suppress_warnings,
)
random = Generator(MT19937())
@@ -100,6 +99,24 @@ class TestBinomial:
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
def test_p_extremely_small(self):
n = 50000000000
p = 5e-17
sample_size = 20000000
x = random.binomial(n, p, size=sample_size)
sample_mean = x.mean()
expected_mean = n * p
sigma = np.sqrt(n * p * (1 - p) / sample_size)
# Note: the parameters were chosen so that expected_mean - 6*sigma
# is a positive value. The first `assert` below validates that
# assumption (in case someone edits the parameters in the future).
# The second `assert` is the actual test.
low_bound = expected_mean - 6 * sigma
assert low_bound > 0, "bad test params: 6-sigma lower bound is negative"
test_msg = (f"sample mean {sample_mean} deviates from the expected mean "
f"{expected_mean} by more than 6*sigma")
assert abs(expected_mean - sample_mean) < 6 * sigma, test_msg
class TestMultinomial:
def test_basic(self):
@@ -158,8 +175,7 @@ class TestMultinomial:
class TestMultivariateHypergeometric:
def setup_method(self):
self.seed = 8675309
seed = 8675309
def test_argument_validation(self):
# Error cases...
@@ -291,37 +307,40 @@ class TestMultivariateHypergeometric:
class TestSetState:
def setup_method(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def _create_rng(self):
seed = 1234567890
rg = Generator(MT19937(seed))
bit_generator = rg.bit_generator
state = bit_generator.state
legacy_state = (state['bit_generator'],
state['state']['key'],
state['state']['pos'])
return rg, bit_generator, state
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
rg, bit_generator, state = self._create_rng()
old = rg.standard_normal(size=3)
bit_generator.state = state
new = rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
rg, bit_generator, state = self._create_rng()
rg.standard_normal()
state = bit_generator.state
old = rg.standard_normal(size=3)
bit_generator.state = state
new = rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
rg, _, _ = self._create_rng()
rg.negative_binomial(0.5, 0.5)
class TestIntegers:
@@ -719,9 +738,7 @@ class TestIntegers:
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup_method(self):
self.seed = 1234567890
seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
@@ -1249,6 +1266,7 @@ class TestRandomDist:
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
@pytest.mark.thread_unsafe(reason="crashes with low memory")
def test_dirichlet_moderately_small_alpha(self):
# Use alpha.max() < 0.1 to trigger stick breaking code path
alpha = np.array([0.02, 0.04, 0.03])
@@ -1463,8 +1481,8 @@ class TestRandomDist:
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov)
pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
@@ -1491,10 +1509,9 @@ class TestRandomDist:
method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
with warnings.catch_warnings():
warnings.simplefilter("error")
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
@@ -1888,8 +1905,7 @@ class TestRandomDist:
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup_method(self):
self.seed = 123456789
seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
@@ -2501,8 +2517,7 @@ class TestBroadcast:
@pytest.mark.skipif(IS_WASM, reason="can't start thread")
class TestThread:
# make sure each state produces the same sequence even in threads
def setup_method(self):
self.seeds = range(4)
seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
@@ -2547,13 +2562,11 @@ class TestThread:
# See Issue #4263
class TestSingleEltArrayInput:
def setup_method(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def _create_arrays(self):
return np.array([2]), np.array([3]), np.array([4]), (1,)
def test_one_arg_funcs(self):
argOne, _, _, tgtShape = self._create_arrays()
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
@@ -2568,11 +2581,12 @@ class TestSingleEltArrayInput:
out = func(np.array([0.5]))
else:
out = func(self.argOne)
out = func(argOne)
assert_equal(out.shape, self.tgtShape)
assert_equal(out.shape, tgtShape)
def test_two_arg_funcs(self):
argOne, argTwo, _, tgtShape = self._create_arrays()
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
@@ -2588,18 +2602,19 @@ class TestSingleEltArrayInput:
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
argTwo = argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(argOne, argTwo)
assert_equal(out.shape, tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(argOne[0], argTwo)
assert_equal(out.shape, tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
out = func(argOne, argTwo[0])
assert_equal(out.shape, tgtShape)
def test_integers(self, endpoint):
_, _, _, tgtShape = self._create_arrays()
itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
@@ -2608,27 +2623,28 @@ class TestSingleEltArrayInput:
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
assert_equal(out.shape, tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
assert_equal(out.shape, tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
assert_equal(out.shape, tgtShape)
def test_three_arg_funcs(self):
argOne, argTwo, argThree, tgtShape = self._create_arrays()
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(argOne, argTwo, argThree)
assert_equal(out.shape, tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(argOne[0], argTwo, argThree)
assert_equal(out.shape, tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(argOne, argTwo[0], argThree)
assert_equal(out.shape, tgtShape)
@pytest.mark.parametrize("config", JUMP_TEST_DATA)

View File

@@ -6,30 +6,32 @@ from numpy.testing import assert_, assert_array_equal
class TestRegression:
def setup_method(self):
self.mt19937 = Generator(MT19937(121263137472525314065))
def _create_generator(self):
return Generator(MT19937(121263137472525314065))
def test_vonmises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
mt19937 = self._create_generator()
for mu in np.linspace(-7., 7., 5):
r = self.mt19937.vonmises(mu, 1, 50)
r = mt19937.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0))
mt19937 = self._create_generator()
assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems
assert_(self.mt19937.hypergeometric(*args) > 0)
assert_(mt19937.hypergeometric(*args) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
mt19937 = self._create_generator()
N = 1000
rvsn = self.mt19937.logseries(0.8, size=N)
rvsn = mt19937.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
@@ -66,34 +68,39 @@ class TestRegression:
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
self.mt19937.multivariate_normal([0], [[0]], size=1)
self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
mt19937 = self._create_generator()
mt19937.multivariate_normal([0], [[0]], size=1)
mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
x = self.mt19937.beta(0.0001, 0.0001, size=100)
mt19937 = self._create_generator()
x = mt19937.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta')
def test_beta_very_small_parameters(self):
# gh-24203: beta would hang with very small parameters.
self.mt19937.beta(1e-49, 1e-40)
mt19937 = self._create_generator()
mt19937.beta(1e-49, 1e-40)
def test_beta_ridiculously_small_parameters(self):
# gh-24266: beta would generate nan when the parameters
# were subnormal or a small multiple of the smallest normal.
mt19937 = self._create_generator()
tiny = np.finfo(1.0).tiny
x = self.mt19937.beta(tiny / 32, tiny / 40, size=50)
x = mt19937.beta(tiny / 32, tiny / 40, size=50)
assert not np.any(np.isnan(x))
def test_beta_expected_zero_frequency(self):
# gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta
# would generate too many zeros.
mt19937 = self._create_generator()
a = 0.0025
b = 0.0025
n = 1000000
x = self.mt19937.beta(a, b, size=n)
x = mt19937.beta(a, b, size=n)
nzeros = np.count_nonzero(x == 0)
# beta CDF at x = np.finfo(np.double).smallest_subnormal/2
# is p = 0.0776169083131899, e.g,
@@ -114,24 +121,26 @@ class TestRegression:
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
mt19937 = self._create_generator()
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = self.mt19937.choice(a, p=probs)
c = mt19937.choice(a, p=probs)
assert_(c in a)
with pytest.raises(ValueError):
self.mt19937.choice(a, p=probs * 0.9)
mt19937.choice(a, p=probs * 0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
mt19937 = self._create_generator()
a = np.array(['a', 'a' * 1000])
for _ in range(100):
self.mt19937.shuffle(a)
mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
@@ -141,10 +150,11 @@ class TestRegression:
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
mt19937 = self._create_generator()
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
self.mt19937.shuffle(a)
mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
@@ -174,10 +184,11 @@ class TestRegression:
assert_array_equal(m.__array__(), np.arange(5))
def test_gamma_0(self):
assert self.mt19937.standard_gamma(0.0) == 0.0
assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0)
mt19937 = self._create_generator()
assert mt19937.standard_gamma(0.0) == 0.0
assert_array_equal(mt19937.standard_gamma([0.0]), 0.0)
actual = self.mt19937.standard_gamma([0.0], dtype='float')
actual = mt19937.standard_gamma([0.0], dtype='float')
expected = np.array([0.], dtype=np.float32)
assert_array_equal(actual, expected)
@@ -185,21 +196,24 @@ class TestRegression:
# Regression test for gh-17007.
# When p = 1e-30, the probability that a sample will exceed 2**63-1
# is 0.9999999999907766, so we expect the result to be all 2**63-1.
assert_array_equal(self.mt19937.geometric(p=1e-30, size=3),
mt19937 = self._create_generator()
assert_array_equal(mt19937.geometric(p=1e-30, size=3),
np.iinfo(np.int64).max)
def test_zipf_large_parameter(self):
# Regression test for part of gh-9829: a call such as rng.zipf(10000)
# would hang.
mt19937 = self._create_generator()
n = 8
sample = self.mt19937.zipf(10000, size=n)
sample = mt19937.zipf(10000, size=n)
assert_array_equal(sample, np.ones(n, dtype=np.int64))
def test_zipf_a_near_1(self):
# Regression test for gh-9829: a call such as rng.zipf(1.0000000000001)
# would hang.
mt19937 = self._create_generator()
n = 100000
sample = self.mt19937.zipf(1.0000000000001, size=n)
sample = mt19937.zipf(1.0000000000001, size=n)
# Not much of a test, but let's do something more than verify that
# it doesn't hang. Certainly for a monotonically decreasing
# discrete distribution truncated to signed 64 bit integers, more

View File

@@ -4,11 +4,7 @@ import pytest
import numpy as np
from numpy import random
from numpy.testing import (
assert_,
assert_array_equal,
assert_raises,
)
from numpy.testing import assert_, assert_array_equal, assert_raises
class TestRegression:
@@ -58,9 +54,9 @@ class TestRegression:
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
random.seed(12345)
rng = random.RandomState(12345)
shuffled = list(t)
random.shuffle(shuffled)
rng.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
@@ -135,9 +131,9 @@ class TestRegression:
class N(np.ndarray):
pass
random.seed(1)
rng = random.RandomState(1)
orig = np.arange(3).view(N)
perm = random.permutation(orig)
perm = rng.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
@@ -147,9 +143,9 @@ class TestRegression:
def __array__(self, dtype=None, copy=None):
return self.a
random.seed(1)
rng = random.RandomState(1)
m = M()
perm = random.permutation(m)
perm = rng.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
@@ -180,27 +176,27 @@ class TestRegression:
reason='Cannot test with 32-bit C long')
def test_randint_117(self):
# GH 14189
random.seed(0)
rng = random.RandomState(0)
expected = np.array([2357136044, 2546248239, 3071714933, 3626093760,
2588848963, 3684848379, 2340255427, 3638918503,
1819583497, 2678185683], dtype='int64')
actual = random.randint(2**32, size=10)
actual = rng.randint(2**32, size=10)
assert_array_equal(actual, expected)
def test_p_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(12345)
assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
rng = random.RandomState(12345)
assert_array_equal(rng.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
[0, 0, 0, 1, 1])
def test_n_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(8675309)
rng = random.RandomState(8675309)
expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
assert_array_equal(rng.binomial([[0], [10]], 0.25, size=(2, 10)),
expected)

View File

@@ -1,12 +1,11 @@
import inspect
import sys
import pytest
import numpy as np
from numpy import random
from numpy.testing import (
assert_,
assert_array_equal,
assert_raises,
)
from numpy.testing import IS_PYPY, assert_, assert_array_equal, assert_raises
class TestRegression:
@@ -56,9 +55,9 @@ class TestRegression:
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
np.random.seed(12345)
rng = np.random.RandomState(12345)
shuffled = list(t)
random.shuffle(shuffled)
rng.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
@@ -133,9 +132,9 @@ class TestRegression:
class N(np.ndarray):
pass
np.random.seed(1)
rng = np.random.RandomState(1)
orig = np.arange(3).view(N)
perm = np.random.permutation(orig)
perm = rng.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
@@ -145,8 +144,32 @@ class TestRegression:
def __array__(self, dtype=None, copy=None):
return self.a
np.random.seed(1)
rng = np.random.RandomState(1)
m = M()
perm = np.random.permutation(m)
perm = rng.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc")
@pytest.mark.parametrize(
"cls",
[
random.Generator,
random.MT19937,
random.PCG64,
random.PCG64DXSM,
random.Philox,
random.RandomState,
random.SFC64,
random.BitGenerator,
random.SeedSequence,
random.bit_generator.SeedlessSeedSequence,
],
)
def test_inspect_signature(self, cls: type) -> None:
assert hasattr(cls, "__text_signature__")
try:
inspect.signature(cls)
except ValueError:
pytest.fail(f"invalid signature: {cls.__module__}.{cls.__qualname__}")

View File

@@ -1,4 +1,5 @@
import pickle
from dataclasses import dataclass
from functools import partial
import pytest
@@ -7,12 +8,8 @@ import numpy as np
from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Generator, Philox
from numpy.testing import assert_, assert_array_equal, assert_equal
@pytest.fixture(scope='module',
params=(np.bool, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64))
def dtype(request):
return request.param
DTYPES_BOOL_INT_UINT = (np.bool, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64)
def params_0(f):
@@ -92,403 +89,459 @@ def warmup(rg, n=None):
rg.random(n, dtype=np.float32)
@dataclass
class RNGData:
bit_generator: type[np.random.BitGenerator]
advance: int
seed: list[int]
rg: Generator
seed_vector_bits: int
class RNG:
@classmethod
def setup_class(cls):
def _create_rng(cls):
# Overridden in test classes. Place holder to silence IDE noise
cls.bit_generator = PCG64
cls.advance = None
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
@classmethod
def _extra_setup(cls):
cls.vec_1d = np.arange(2.0, 102.0)
cls.vec_2d = np.arange(2.0, 102.0)[None, :]
cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100))
cls.seed_error = TypeError
def _reset_state(self):
self.rg.bit_generator.state = self.initial_state
bit_generator = PCG64
advance = None
seed = [12345]
rg = Generator(bit_generator(*seed))
seed_vector_bits = 64
return RNGData(bit_generator, advance, seed, rg, seed_vector_bits)
def test_init(self):
rg = Generator(self.bit_generator())
state = rg.bit_generator.state
rg.standard_normal(1)
rg.standard_normal(1)
rg.bit_generator.state = state
new_state = rg.bit_generator.state
data = self._create_rng()
data.rg = Generator(data.bit_generator())
state = data.rg.bit_generator.state
data.rg.standard_normal(1)
data.rg.standard_normal(1)
data.rg.bit_generator.state = state
new_state = data.rg.bit_generator.state
assert_(comp_state(state, new_state))
def test_advance(self):
state = self.rg.bit_generator.state
if hasattr(self.rg.bit_generator, 'advance'):
self.rg.bit_generator.advance(self.advance)
assert_(not comp_state(state, self.rg.bit_generator.state))
data = self._create_rng()
state = data.rg.bit_generator.state
if hasattr(data.rg.bit_generator, 'advance'):
data.rg.bit_generator.advance(data.advance)
assert_(not comp_state(state, data.rg.bit_generator.state))
else:
bitgen_name = self.rg.bit_generator.__class__.__name__
bitgen_name = data.rg.bit_generator.__class__.__name__
pytest.skip(f'Advance is not supported by {bitgen_name}')
def test_jump(self):
state = self.rg.bit_generator.state
if hasattr(self.rg.bit_generator, 'jumped'):
bit_gen2 = self.rg.bit_generator.jumped()
rg = self._create_rng().rg
state = rg.bit_generator.state
if hasattr(rg.bit_generator, 'jumped'):
bit_gen2 = rg.bit_generator.jumped()
jumped_state = bit_gen2.state
assert_(not comp_state(state, jumped_state))
self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17)
self.rg.bit_generator.state = state
bit_gen3 = self.rg.bit_generator.jumped()
rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17)
rg.bit_generator.state = state
bit_gen3 = rg.bit_generator.jumped()
rejumped_state = bit_gen3.state
assert_(comp_state(jumped_state, rejumped_state))
else:
bitgen_name = self.rg.bit_generator.__class__.__name__
bitgen_name = rg.bit_generator.__class__.__name__
if bitgen_name not in ('SFC64',):
raise AttributeError(f'no "jumped" in {bitgen_name}')
pytest.skip(f'Jump is not supported by {bitgen_name}')
def test_uniform(self):
r = self.rg.uniform(-1.0, 0.0, size=10)
rg = self._create_rng().rg
r = rg.uniform(-1.0, 0.0, size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
def test_uniform_array(self):
r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10)
rg = self._create_rng().rg
r = rg.uniform(np.array([-1.0] * 10), 0.0, size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
r = self.rg.uniform(np.array([-1.0] * 10),
r = rg.uniform(np.array([-1.0] * 10),
np.array([0.0] * 10), size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10)
r = rg.uniform(-1.0, np.array([0.0] * 10), size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
def test_random(self):
assert_(len(self.rg.random(10)) == 10)
params_0(self.rg.random)
rg = self._create_rng().rg
assert_(len(rg.random(10)) == 10)
params_0(rg.random)
def test_standard_normal_zig(self):
assert_(len(self.rg.standard_normal(10)) == 10)
rg = self._create_rng().rg
assert_(len(rg.standard_normal(10)) == 10)
def test_standard_normal(self):
assert_(len(self.rg.standard_normal(10)) == 10)
params_0(self.rg.standard_normal)
rg = self._create_rng().rg
assert_(len(rg.standard_normal(10)) == 10)
params_0(rg.standard_normal)
def test_standard_gamma(self):
assert_(len(self.rg.standard_gamma(10, 10)) == 10)
assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10)
params_1(self.rg.standard_gamma)
rg = self._create_rng().rg
assert_(len(rg.standard_gamma(10, 10)) == 10)
assert_(len(rg.standard_gamma(np.array([10] * 10), 10)) == 10)
params_1(rg.standard_gamma)
def test_standard_exponential(self):
assert_(len(self.rg.standard_exponential(10)) == 10)
params_0(self.rg.standard_exponential)
rg = self._create_rng().rg
assert_(len(rg.standard_exponential(10)) == 10)
params_0(rg.standard_exponential)
def test_standard_exponential_float(self):
randoms = self.rg.standard_exponential(10, dtype='float32')
rg = self._create_rng().rg
randoms = rg.standard_exponential(10, dtype='float32')
assert_(len(randoms) == 10)
assert randoms.dtype == np.float32
params_0(partial(self.rg.standard_exponential, dtype='float32'))
params_0(partial(rg.standard_exponential, dtype='float32'))
def test_standard_exponential_float_log(self):
randoms = self.rg.standard_exponential(10, dtype='float32',
rg = self._create_rng().rg
randoms = rg.standard_exponential(10, dtype='float32',
method='inv')
assert_(len(randoms) == 10)
assert randoms.dtype == np.float32
params_0(partial(self.rg.standard_exponential, dtype='float32',
params_0(partial(rg.standard_exponential, dtype='float32',
method='inv'))
def test_standard_cauchy(self):
assert_(len(self.rg.standard_cauchy(10)) == 10)
params_0(self.rg.standard_cauchy)
rg = self._create_rng().rg
assert_(len(rg.standard_cauchy(10)) == 10)
params_0(rg.standard_cauchy)
def test_standard_t(self):
assert_(len(self.rg.standard_t(10, 10)) == 10)
params_1(self.rg.standard_t)
rg = self._create_rng().rg
assert_(len(rg.standard_t(10, 10)) == 10)
params_1(rg.standard_t)
def test_binomial(self):
assert_(self.rg.binomial(10, .5) >= 0)
assert_(self.rg.binomial(1000, .5) >= 0)
rg = self._create_rng().rg
assert_(rg.binomial(10, .5) >= 0)
assert_(rg.binomial(1000, .5) >= 0)
def test_reset_state(self):
state = self.rg.bit_generator.state
int_1 = self.rg.integers(2**31)
self.rg.bit_generator.state = state
int_2 = self.rg.integers(2**31)
rg = self._create_rng().rg
state = rg.bit_generator.state
int_1 = rg.integers(2**31)
rg.bit_generator.state = state
int_2 = rg.integers(2**31)
assert_(int_1 == int_2)
def test_entropy_init(self):
rg = Generator(self.bit_generator())
rg2 = Generator(self.bit_generator())
bit_generator = self._create_rng().bit_generator
rg = Generator(bit_generator())
rg2 = Generator(bit_generator())
assert_(not comp_state(rg.bit_generator.state,
rg2.bit_generator.state))
def test_seed(self):
rg = Generator(self.bit_generator(*self.seed))
rg2 = Generator(self.bit_generator(*self.seed))
data = self._create_rng()
rg = Generator(data.bit_generator(*data.seed))
rg2 = Generator(data.bit_generator(*data.seed))
rg.random()
rg2.random()
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_reset_state_gauss(self):
rg = Generator(self.bit_generator(*self.seed))
data = self._create_rng()
rg = Generator(data.bit_generator(*data.seed))
rg.standard_normal()
state = rg.bit_generator.state
n1 = rg.standard_normal(size=10)
rg2 = Generator(self.bit_generator())
rg2 = Generator(data.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.standard_normal(size=10)
assert_array_equal(n1, n2)
def test_reset_state_uint32(self):
rg = Generator(self.bit_generator(*self.seed))
data = self._create_rng()
rg = Generator(data.bit_generator(*data.seed))
rg.integers(0, 2 ** 24, 120, dtype=np.uint32)
state = rg.bit_generator.state
n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32)
rg2 = Generator(self.bit_generator())
rg2 = Generator(data.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32)
assert_array_equal(n1, n2)
def test_reset_state_float(self):
rg = Generator(self.bit_generator(*self.seed))
data = self._create_rng()
rg = Generator(data.bit_generator(*data.seed))
rg.random(dtype='float32')
state = rg.bit_generator.state
n1 = rg.random(size=10, dtype='float32')
rg2 = Generator(self.bit_generator())
rg2 = Generator(data.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.random(size=10, dtype='float32')
assert_((n1 == n2).all())
def test_shuffle(self):
rg = self._create_rng().rg
original = np.arange(200, 0, -1)
permuted = self.rg.permutation(original)
permuted = rg.permutation(original)
assert_((original != permuted).any())
def test_permutation(self):
rg = self._create_rng().rg
original = np.arange(200, 0, -1)
permuted = self.rg.permutation(original)
permuted = rg.permutation(original)
assert_((original != permuted).any())
def test_beta(self):
vals = self.rg.beta(2.0, 2.0, 10)
rg = self._create_rng().rg
vals = rg.beta(2.0, 2.0, 10)
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), 2.0)
vals = rg.beta(np.array([2.0] * 10), 2.0)
assert_(len(vals) == 10)
vals = self.rg.beta(2.0, np.array([2.0] * 10))
vals = rg.beta(2.0, np.array([2.0] * 10))
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10))
vals = rg.beta(np.array([2.0] * 10), np.array([2.0] * 10))
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10))
vals = rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10))
assert_(vals.shape == (10, 10))
def test_bytes(self):
vals = self.rg.bytes(10)
rg = self._create_rng().rg
vals = rg.bytes(10)
assert_(len(vals) == 10)
def test_chisquare(self):
vals = self.rg.chisquare(2.0, 10)
rg = self._create_rng().rg
vals = rg.chisquare(2.0, 10)
assert_(len(vals) == 10)
params_1(self.rg.chisquare)
params_1(rg.chisquare)
def test_exponential(self):
vals = self.rg.exponential(2.0, 10)
rg = self._create_rng().rg
vals = rg.exponential(2.0, 10)
assert_(len(vals) == 10)
params_1(self.rg.exponential)
params_1(rg.exponential)
def test_f(self):
vals = self.rg.f(3, 1000, 10)
rg = self._create_rng().rg
vals = rg.f(3, 1000, 10)
assert_(len(vals) == 10)
def test_gamma(self):
vals = self.rg.gamma(3, 2, 10)
rg = self._create_rng().rg
vals = rg.gamma(3, 2, 10)
assert_(len(vals) == 10)
def test_geometric(self):
vals = self.rg.geometric(0.5, 10)
rg = self._create_rng().rg
vals = rg.geometric(0.5, 10)
assert_(len(vals) == 10)
params_1(self.rg.exponential, bounded=True)
params_1(rg.exponential, bounded=True)
def test_gumbel(self):
vals = self.rg.gumbel(2.0, 2.0, 10)
rg = self._create_rng().rg
vals = rg.gumbel(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_laplace(self):
vals = self.rg.laplace(2.0, 2.0, 10)
rg = self._create_rng().rg
vals = rg.laplace(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_logitic(self):
vals = self.rg.logistic(2.0, 2.0, 10)
rg = self._create_rng().rg
vals = rg.logistic(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_logseries(self):
vals = self.rg.logseries(0.5, 10)
rg = self._create_rng().rg
vals = rg.logseries(0.5, 10)
assert_(len(vals) == 10)
def test_negative_binomial(self):
vals = self.rg.negative_binomial(10, 0.2, 10)
rg = self._create_rng().rg
vals = rg.negative_binomial(10, 0.2, 10)
assert_(len(vals) == 10)
def test_noncentral_chisquare(self):
vals = self.rg.noncentral_chisquare(10, 2, 10)
rg = self._create_rng().rg
vals = rg.noncentral_chisquare(10, 2, 10)
assert_(len(vals) == 10)
def test_noncentral_f(self):
vals = self.rg.noncentral_f(3, 1000, 2, 10)
rg = self._create_rng().rg
vals = rg.noncentral_f(3, 1000, 2, 10)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2)
vals = rg.noncentral_f(np.array([3] * 10), 1000, 2)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2)
vals = rg.noncentral_f(3, np.array([1000] * 10), 2)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10))
vals = rg.noncentral_f(3, 1000, np.array([2] * 10))
assert_(len(vals) == 10)
def test_normal(self):
vals = self.rg.normal(10, 0.2, 10)
rg = self._create_rng().rg
vals = rg.normal(10, 0.2, 10)
assert_(len(vals) == 10)
def test_pareto(self):
vals = self.rg.pareto(3.0, 10)
rg = self._create_rng().rg
vals = rg.pareto(3.0, 10)
assert_(len(vals) == 10)
def test_poisson(self):
vals = self.rg.poisson(10, 10)
rg = self._create_rng().rg
vals = rg.poisson(10, 10)
assert_(len(vals) == 10)
vals = self.rg.poisson(np.array([10] * 10))
vals = rg.poisson(np.array([10] * 10))
assert_(len(vals) == 10)
params_1(self.rg.poisson)
params_1(rg.poisson)
def test_power(self):
vals = self.rg.power(0.2, 10)
rg = self._create_rng().rg
vals = rg.power(0.2, 10)
assert_(len(vals) == 10)
def test_integers(self):
vals = self.rg.integers(10, 20, 10)
rg = self._create_rng().rg
vals = rg.integers(10, 20, 10)
assert_(len(vals) == 10)
def test_rayleigh(self):
vals = self.rg.rayleigh(0.2, 10)
rg = self._create_rng().rg
vals = rg.rayleigh(0.2, 10)
assert_(len(vals) == 10)
params_1(self.rg.rayleigh, bounded=True)
params_1(rg.rayleigh, bounded=True)
def test_vonmises(self):
vals = self.rg.vonmises(10, 0.2, 10)
rg = self._create_rng().rg
vals = rg.vonmises(10, 0.2, 10)
assert_(len(vals) == 10)
def test_wald(self):
vals = self.rg.wald(1.0, 1.0, 10)
rg = self._create_rng().rg
vals = rg.wald(1.0, 1.0, 10)
assert_(len(vals) == 10)
def test_weibull(self):
vals = self.rg.weibull(1.0, 10)
rg = self._create_rng().rg
vals = rg.weibull(1.0, 10)
assert_(len(vals) == 10)
def test_zipf(self):
vals = self.rg.zipf(10, 10)
rg = self._create_rng().rg
vec_1d = np.arange(2.0, 102.0)
vec_2d = np.arange(2.0, 102.0)[None, :]
mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100))
vals = rg.zipf(10, 10)
assert_(len(vals) == 10)
vals = self.rg.zipf(self.vec_1d)
vals = rg.zipf(vec_1d)
assert_(len(vals) == 100)
vals = self.rg.zipf(self.vec_2d)
vals = rg.zipf(vec_2d)
assert_(vals.shape == (1, 100))
vals = self.rg.zipf(self.mat)
vals = rg.zipf(mat)
assert_(vals.shape == (100, 100))
def test_hypergeometric(self):
vals = self.rg.hypergeometric(25, 25, 20)
rg = self._create_rng().rg
vals = rg.hypergeometric(25, 25, 20)
assert_(np.isscalar(vals))
vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20)
vals = rg.hypergeometric(np.array([25] * 10), 25, 20)
assert_(vals.shape == (10,))
def test_triangular(self):
vals = self.rg.triangular(-5, 0, 5)
rg = self._create_rng().rg
vals = rg.triangular(-5, 0, 5)
assert_(np.isscalar(vals))
vals = self.rg.triangular(-5, np.array([0] * 10), 5)
vals = rg.triangular(-5, np.array([0] * 10), 5)
assert_(vals.shape == (10,))
def test_multivariate_normal(self):
rg = self._create_rng().rg
mean = [0, 0]
cov = [[1, 0], [0, 100]] # diagonal covariance
x = self.rg.multivariate_normal(mean, cov, 5000)
x = rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
x_zig = self.rg.multivariate_normal(mean, cov, 5000)
x_zig = rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
x_inv = self.rg.multivariate_normal(mean, cov, 5000)
x_inv = rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
assert_((x_zig != x_inv).any())
def test_multinomial(self):
vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3])
rg = self._create_rng().rg
vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3])
assert_(vals.shape == (2,))
vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10)
vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10)
assert_(vals.shape == (10, 2))
def test_dirichlet(self):
s = self.rg.dirichlet((10, 5, 3), 20)
rg = self._create_rng().rg
s = rg.dirichlet((10, 5, 3), 20)
assert_(s.shape == (20, 3))
def test_pickle(self):
pick = pickle.dumps(self.rg)
rg = self._create_rng().rg
pick = pickle.dumps(rg)
unpick = pickle.loads(pick)
assert_(type(self.rg) == type(unpick))
assert_(comp_state(self.rg.bit_generator.state,
assert_(type(rg) == type(unpick))
assert_(comp_state(rg.bit_generator.state,
unpick.bit_generator.state))
pick = pickle.dumps(self.rg)
pick = pickle.dumps(rg)
unpick = pickle.loads(pick)
assert_(type(self.rg) == type(unpick))
assert_(comp_state(self.rg.bit_generator.state,
assert_(type(rg) == type(unpick))
assert_(comp_state(rg.bit_generator.state,
unpick.bit_generator.state))
def test_seed_array(self):
if self.seed_vector_bits is None:
bitgen_name = self.bit_generator.__name__
data = self._create_rng()
if data.seed_vector_bits is None:
bitgen_name = data.bit_generator.__name__
pytest.skip(f'Vector seeding is not supported by {bitgen_name}')
if self.seed_vector_bits == 32:
if data.seed_vector_bits == 32:
dtype = np.uint32
else:
dtype = np.uint64
seed = np.array([1], dtype=dtype)
bg = self.bit_generator(seed)
bg = data.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(1)
bg = data.bit_generator(1)
state2 = bg.state
assert_(comp_state(state1, state2))
seed = np.arange(4, dtype=dtype)
bg = self.bit_generator(seed)
bg = data.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
bg = data.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
seed = np.arange(1500, dtype=dtype)
bg = self.bit_generator(seed)
bg = data.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
bg = data.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
seed = 2 ** np.mod(np.arange(1500, dtype=dtype),
self.seed_vector_bits - 1) + 1
bg = self.bit_generator(seed)
data.seed_vector_bits - 1) + 1
bg = data.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
bg = data.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
def test_uniform_float(self):
rg = Generator(self.bit_generator(12345))
bit_generator = self._create_rng().bit_generator
rg = Generator(bit_generator(12345))
warmup(rg)
state = rg.bit_generator.state
r1 = rg.random(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
rg2 = Generator(bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.random(11, dtype=np.float32)
@@ -497,11 +550,12 @@ class RNG:
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_gamma_floats(self):
rg = Generator(self.bit_generator())
bit_generator = self._create_rng().bit_generator
rg = Generator(bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_gamma(4.0, 11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
rg2 = Generator(bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32)
@@ -510,11 +564,12 @@ class RNG:
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_normal_floats(self):
rg = Generator(self.bit_generator())
bit_generator = self._create_rng().bit_generator
rg = Generator(bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_normal(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
rg2 = Generator(bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_normal(11, dtype=np.float32)
@@ -523,11 +578,12 @@ class RNG:
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_normal_zig_floats(self):
rg = Generator(self.bit_generator())
bit_generator = self._create_rng().bit_generator
rg = Generator(bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_normal(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
rg2 = Generator(bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_normal(11, dtype=np.float32)
@@ -536,7 +592,7 @@ class RNG:
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_output_fill(self):
rg = self.rg
rg = self._create_rng().rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
@@ -558,7 +614,7 @@ class RNG:
assert_equal(direct, existing)
def test_output_filling_uniform(self):
rg = self.rg
rg = self._create_rng().rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
@@ -576,7 +632,7 @@ class RNG:
assert_equal(direct, existing)
def test_output_filling_exponential(self):
rg = self.rg
rg = self._create_rng().rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
@@ -594,7 +650,7 @@ class RNG:
assert_equal(direct, existing)
def test_output_filling_gamma(self):
rg = self.rg
rg = self._create_rng().rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.zeros(size)
@@ -612,7 +668,7 @@ class RNG:
assert_equal(direct, existing)
def test_output_filling_gamma_broadcast(self):
rg = self.rg
rg = self._create_rng().rg
state = rg.bit_generator.state
size = (31, 7, 97)
mu = np.arange(97.0) + 1.0
@@ -631,7 +687,7 @@ class RNG:
assert_equal(direct, existing)
def test_output_fill_error(self):
rg = self.rg
rg = self._create_rng().rg
size = (31, 7, 97)
existing = np.empty(size)
with pytest.raises(TypeError):
@@ -653,7 +709,14 @@ class RNG:
with pytest.raises(ValueError):
rg.standard_gamma(1.0, out=existing[::3])
@pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT)
def test_integers_broadcast(self, dtype):
rg = self._create_rng().rg
initial_state = rg.bit_generator.state
def reset_state(rng):
rng.bit_generator.state = initial_state
if dtype == np.bool:
upper = 2
lower = 0
@@ -661,45 +724,50 @@ class RNG:
info = np.iinfo(dtype)
upper = int(info.max) + 1
lower = info.min
self._reset_state()
a = self.rg.integers(lower, [upper] * 10, dtype=dtype)
self._reset_state()
b = self.rg.integers([lower] * 10, upper, dtype=dtype)
reset_state(rg)
rg.bit_generator.state = initial_state
a = rg.integers(lower, [upper] * 10, dtype=dtype)
reset_state(rg)
b = rg.integers([lower] * 10, upper, dtype=dtype)
assert_equal(a, b)
self._reset_state()
c = self.rg.integers(lower, upper, size=10, dtype=dtype)
reset_state(rg)
c = rg.integers(lower, upper, size=10, dtype=dtype)
assert_equal(a, c)
self._reset_state()
d = self.rg.integers(np.array(
reset_state(rg)
d = rg.integers(np.array(
[lower] * 10), np.array([upper], dtype=object), size=10,
dtype=dtype)
assert_equal(a, d)
self._reset_state()
e = self.rg.integers(
reset_state(rg)
e = rg.integers(
np.array([lower] * 10), np.array([upper] * 10), size=10,
dtype=dtype)
assert_equal(a, e)
self._reset_state()
a = self.rg.integers(0, upper, size=10, dtype=dtype)
self._reset_state()
b = self.rg.integers([upper] * 10, dtype=dtype)
reset_state(rg)
a = rg.integers(0, upper, size=10, dtype=dtype)
reset_state(rg)
b = rg.integers([upper] * 10, dtype=dtype)
assert_equal(a, b)
@pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT)
def test_integers_numpy(self, dtype):
rg = self._create_rng().rg
high = np.array([1])
low = np.array([0])
out = self.rg.integers(low, high, dtype=dtype)
out = rg.integers(low, high, dtype=dtype)
assert out.shape == (1,)
out = self.rg.integers(low[0], high, dtype=dtype)
out = rg.integers(low[0], high, dtype=dtype)
assert out.shape == (1,)
out = self.rg.integers(low, high[0], dtype=dtype)
out = rg.integers(low, high[0], dtype=dtype)
assert out.shape == (1,)
@pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT)
def test_integers_broadcast_errors(self, dtype):
rg = self._create_rng().rg
if dtype == np.bool:
upper = 2
lower = 0
@@ -708,102 +776,97 @@ class RNG:
upper = int(info.max) + 1
lower = info.min
with pytest.raises(ValueError):
self.rg.integers(lower, [upper + 1] * 10, dtype=dtype)
rg.integers(lower, [upper + 1] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers(lower - 1, [upper] * 10, dtype=dtype)
rg.integers(lower - 1, [upper] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers([lower - 1], [upper] * 10, dtype=dtype)
rg.integers([lower - 1], [upper] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers([0], [0], dtype=dtype)
rg.integers([0], [0], dtype=dtype)
class TestMT19937(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.advance = None
cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 32
cls._extra_setup()
cls.seed_error = ValueError
def _create_rng(cls):
bit_generator = MT19937
advance = None
seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]
rg = Generator(bit_generator(*seed))
seed_vector_bits = 32
return RNGData(bit_generator, advance, seed, rg, seed_vector_bits)
def test_numpy_state(self):
rg = self._create_rng().rg
nprg = np.random.RandomState()
nprg.standard_normal(99)
state = nprg.get_state()
self.rg.bit_generator.state = state
state2 = self.rg.bit_generator.state
rg.bit_generator.state = state
state2 = rg.bit_generator.state
assert_((state[1] == state2['state']['key']).all())
assert_(state[2] == state2['state']['pos'])
class TestPhilox(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
def _create_rng(cls):
bit_generator = Philox
advance = 2**63 + 2**31 + 2**15 + 1
seed = [12345]
rg = Generator(bit_generator(*seed))
seed_vector_bits = 64
return RNGData(bit_generator, advance, seed, rg, seed_vector_bits)
class TestSFC64(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.advance = None
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 192
cls._extra_setup()
def _create_rng(cls):
bit_generator = SFC64
advance = None
seed = [12345]
rg = Generator(bit_generator(*seed))
seed_vector_bits = 192
return RNGData(bit_generator, advance, seed, rg, seed_vector_bits)
class TestPCG64(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
def _create_rng(cls):
bit_generator = PCG64
advance = 2**63 + 2**31 + 2**15 + 1
seed = [12345]
rg = Generator(bit_generator(*seed))
seed_vector_bits = 64
return RNGData(bit_generator, advance, seed, rg, seed_vector_bits)
class TestPCG64DXSM(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64DXSM
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
def _create_rng(cls):
bit_generator = PCG64DXSM
advance = 2**63 + 2**31 + 2**15 + 1
seed = [12345]
rg = Generator(bit_generator(*seed))
seed_vector_bits = 64
return RNGData(bit_generator, advance, seed, rg, seed_vector_bits)
class TestDefaultRNG(RNG):
@classmethod
def setup_class(cls):
def _create_rng(cls):
# This will duplicate some tests that directly instantiate a fresh
# Generator(), but that's okay.
cls.bit_generator = PCG64
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = np.random.default_rng(*cls.seed)
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
bit_generator = PCG64
advance = 2**63 + 2**31 + 2**15 + 1
seed = [12345]
rg = np.random.default_rng(*seed)
seed_vector_bits = 64
return RNGData(bit_generator, advance, seed, rg, seed_vector_bits)
def test_default_is_pcg64(self):
# In order to change the default BitGenerator, we'll go through
# a deprecation cycle to move to a different function.
assert_(isinstance(self.rg.bit_generator, PCG64))
rg = self._create_rng().rg
assert_(isinstance(rg.bit_generator, PCG64))
def test_seed(self):
np.random.default_rng()