增加环绕侦察场景适配
This commit is contained in:
@@ -41,7 +41,7 @@ CONFIG = _cleanup(
|
||||
"cython": {
|
||||
"name": "cython",
|
||||
"linker": r"cython",
|
||||
"version": "3.1.4",
|
||||
"version": "3.2.3",
|
||||
"commands": r"cython",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
@@ -93,7 +93,7 @@ CONFIG = _cleanup(
|
||||
},
|
||||
},
|
||||
"Python Information": {
|
||||
"path": r"/tmp/build-env-xnu9j23j/bin/python",
|
||||
"path": r"/tmp/build-env-qfl135gh/bin/python",
|
||||
"version": "3.13",
|
||||
},
|
||||
"SIMD Extensions": {
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
from enum import Enum
|
||||
from types import ModuleType
|
||||
from typing import Final, NotRequired, TypedDict, overload, type_check_only
|
||||
from typing import Literal as L
|
||||
from typing import (
|
||||
Final,
|
||||
Literal as L,
|
||||
NotRequired,
|
||||
TypedDict,
|
||||
overload,
|
||||
type_check_only,
|
||||
)
|
||||
|
||||
_CompilerConfigDictValue = TypedDict(
|
||||
"_CompilerConfigDictValue",
|
||||
|
||||
@@ -156,6 +156,7 @@ cdef extern from "numpy/arrayobject.h":
|
||||
NPY_SAFE_CASTING
|
||||
NPY_SAME_KIND_CASTING
|
||||
NPY_UNSAFE_CASTING
|
||||
NPY_SAME_VALUE_CASTING
|
||||
|
||||
ctypedef enum NPY_CLIPMODE:
|
||||
NPY_CLIP
|
||||
|
||||
@@ -165,6 +165,7 @@ cdef extern from "numpy/arrayobject.h":
|
||||
NPY_SAFE_CASTING
|
||||
NPY_SAME_KIND_CASTING
|
||||
NPY_UNSAFE_CASTING
|
||||
NPY_SAME_VALUE_CASTING
|
||||
|
||||
ctypedef enum NPY_CLIPMODE:
|
||||
NPY_CLIP
|
||||
|
||||
@@ -111,10 +111,13 @@ else:
|
||||
try:
|
||||
from numpy.__config__ import show_config
|
||||
except ImportError as e:
|
||||
msg = """Error importing numpy: you should not try to import numpy from
|
||||
its source directory; please exit the numpy source tree, and relaunch
|
||||
your python interpreter from there."""
|
||||
raise ImportError(msg) from e
|
||||
if isinstance(e, ModuleNotFoundError) and e.name == "numpy.__config__":
|
||||
# The __config__ module itself was not found, so add this info:
|
||||
msg = """Error importing numpy: you should not try to import numpy from
|
||||
its source directory; please exit the numpy source tree, and relaunch
|
||||
your python interpreter from there."""
|
||||
raise ImportError(msg) from e
|
||||
raise
|
||||
|
||||
from . import _core
|
||||
from ._core import (
|
||||
@@ -451,13 +454,11 @@ else:
|
||||
pass
|
||||
del ta
|
||||
|
||||
from . import lib
|
||||
from . import matrixlib as _mat
|
||||
from . import lib, matrixlib as _mat
|
||||
from .lib import scimath as emath
|
||||
from .lib._arraypad_impl import pad
|
||||
from .lib._arraysetops_impl import (
|
||||
ediff1d,
|
||||
in1d,
|
||||
intersect1d,
|
||||
isin,
|
||||
setdiff1d,
|
||||
@@ -504,7 +505,6 @@ else:
|
||||
sinc,
|
||||
sort_complex,
|
||||
trapezoid,
|
||||
trapz,
|
||||
trim_zeros,
|
||||
unwrap,
|
||||
vectorize,
|
||||
@@ -675,9 +675,6 @@ else:
|
||||
|
||||
from ._array_api_info import __array_namespace_info__
|
||||
|
||||
# now that numpy core module is imported, can initialize limits
|
||||
_core.getlimits._register_known_types()
|
||||
|
||||
__all__ = list(
|
||||
__numpy_submodules__ |
|
||||
set(_core.__all__) |
|
||||
@@ -873,6 +870,23 @@ else:
|
||||
del w
|
||||
del _mac_os_check
|
||||
|
||||
def blas_fpe_check():
|
||||
# Check if BLAS adds spurious FPEs, mostly seen on M4 arms with Accelerate.
|
||||
with errstate(all='raise'):
|
||||
x = ones((20, 20))
|
||||
try:
|
||||
x @ x
|
||||
except FloatingPointError:
|
||||
res = _core._multiarray_umath._blas_supports_fpe(False)
|
||||
if res: # res was not modified (hardcoded to True for now)
|
||||
warnings.warn(
|
||||
"Spurious warnings given by blas but suppression not "
|
||||
"set up on this platform. Please open a NumPy issue.",
|
||||
UserWarning, stacklevel=2)
|
||||
|
||||
blas_fpe_check()
|
||||
del blas_fpe_check
|
||||
|
||||
def hugepage_setup():
|
||||
"""
|
||||
We usually use madvise hugepages support, but on some old kernels it
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,5 +1,4 @@
|
||||
from typing import (
|
||||
ClassVar,
|
||||
Literal,
|
||||
Never,
|
||||
TypeAlias,
|
||||
@@ -118,14 +117,14 @@ _EmptyDict: TypeAlias = dict[Never, Never]
|
||||
|
||||
@final
|
||||
class __array_namespace_info__:
|
||||
__module__: ClassVar[Literal['numpy']]
|
||||
__module__: Literal["numpy"] = "numpy"
|
||||
|
||||
def capabilities(self) -> _Capabilities: ...
|
||||
def default_device(self) -> _Device: ...
|
||||
def default_dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
device: _DeviceLike = None,
|
||||
) -> _DefaultDTypes: ...
|
||||
def devices(self) -> list[_Device]: ...
|
||||
|
||||
@@ -133,49 +132,49 @@ class __array_namespace_info__:
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: None = ...,
|
||||
device: _DeviceLike = None,
|
||||
kind: None = None,
|
||||
) -> _DTypes: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
device: _DeviceLike = None,
|
||||
kind: _Permute1[_KindBool],
|
||||
) -> _DTypesBool: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
device: _DeviceLike = None,
|
||||
kind: _Permute1[_KindInt],
|
||||
) -> _DTypesInt: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
device: _DeviceLike = None,
|
||||
kind: _Permute1[_KindUInt],
|
||||
) -> _DTypesUInt: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
device: _DeviceLike = None,
|
||||
kind: _Permute1[_KindFloat],
|
||||
) -> _DTypesFloat: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
device: _DeviceLike = None,
|
||||
kind: _Permute1[_KindComplex],
|
||||
) -> _DTypesComplex: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
device: _DeviceLike = None,
|
||||
kind: (
|
||||
_Permute1[_KindInteger]
|
||||
| _Permute2[_KindInt, _KindUInt]
|
||||
@@ -185,7 +184,7 @@ class __array_namespace_info__:
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
device: _DeviceLike = None,
|
||||
kind: (
|
||||
_Permute1[_KindNumber]
|
||||
| _Permute3[_KindInteger, _KindFloat, _KindComplex]
|
||||
@@ -195,13 +194,13 @@ class __array_namespace_info__:
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
device: _DeviceLike = None,
|
||||
kind: tuple[()],
|
||||
) -> _EmptyDict: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
device: _DeviceLike = None,
|
||||
kind: tuple[_Kind, ...],
|
||||
) -> _DTypesUnion: ...
|
||||
|
||||
@@ -22,29 +22,64 @@ try:
|
||||
from . import multiarray
|
||||
except ImportError as exc:
|
||||
import sys
|
||||
msg = """
|
||||
|
||||
# Bypass for the module re-initialization opt-out
|
||||
if exc.msg == "cannot load module more than once per process":
|
||||
raise
|
||||
|
||||
# Basically always, the problem should be that the C module is wrong/missing...
|
||||
if (
|
||||
isinstance(exc, ModuleNotFoundError)
|
||||
and exc.name == "numpy._core._multiarray_umath"
|
||||
):
|
||||
import sys
|
||||
candidates = []
|
||||
for path in __path__:
|
||||
candidates.extend(
|
||||
f for f in os.listdir(path) if f.startswith("_multiarray_umath"))
|
||||
if len(candidates) == 0:
|
||||
bad_c_module_info = (
|
||||
"We found no compiled module, did NumPy build successfully?\n")
|
||||
else:
|
||||
candidate_str = '\n * '.join(candidates)
|
||||
# cache_tag is documented to be possibly None, so just use name if it is
|
||||
# this guesses at cache_tag being the same as the extension module scheme
|
||||
tag = sys.implementation.cache_tag or sys.implementation.name
|
||||
bad_c_module_info = (
|
||||
f"The following compiled module files exist, but seem incompatible\n"
|
||||
f"with with either python '{tag}' or the "
|
||||
f"platform '{sys.platform}':\n\n * {candidate_str}\n"
|
||||
)
|
||||
else:
|
||||
bad_c_module_info = ""
|
||||
|
||||
major, minor, *_ = sys.version_info
|
||||
msg = f"""
|
||||
|
||||
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
|
||||
|
||||
Importing the numpy C-extensions failed. This error can happen for
|
||||
many reasons, often due to issues with your setup or how NumPy was
|
||||
installed.
|
||||
|
||||
{bad_c_module_info}
|
||||
We have compiled some common reasons and troubleshooting tips at:
|
||||
|
||||
https://numpy.org/devdocs/user/troubleshooting-importerror.html
|
||||
|
||||
Please note and check the following:
|
||||
|
||||
* The Python version is: Python%d.%d from "%s"
|
||||
* The NumPy version is: "%s"
|
||||
* The Python version is: Python {major}.{minor} from "{sys.executable}"
|
||||
* The NumPy version is: "{__version__}"
|
||||
|
||||
and make sure that they are the versions you expect.
|
||||
Please carefully study the documentation linked above for further help.
|
||||
|
||||
Original error was: %s
|
||||
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
|
||||
__version__, exc)
|
||||
Please carefully study the information and documentation linked above.
|
||||
This is unlikely to be a NumPy issue but will be caused by a bad install
|
||||
or environment on your machine.
|
||||
|
||||
Original error was: {exc}
|
||||
"""
|
||||
|
||||
raise ImportError(msg) from exc
|
||||
finally:
|
||||
for envkey in env_added:
|
||||
@@ -71,15 +106,7 @@ from . import numerictypes as nt
|
||||
from .numerictypes import sctypeDict, sctypes
|
||||
|
||||
multiarray.set_typeDict(nt.sctypeDict)
|
||||
from . import (
|
||||
_machar,
|
||||
einsumfunc,
|
||||
fromnumeric,
|
||||
function_base,
|
||||
getlimits,
|
||||
numeric,
|
||||
shape_base,
|
||||
)
|
||||
from . import einsumfunc, fromnumeric, function_base, getlimits, numeric, shape_base
|
||||
from .einsumfunc import *
|
||||
from .fromnumeric import *
|
||||
from .function_base import *
|
||||
@@ -160,18 +187,6 @@ def _DType_reduce(DType):
|
||||
return _DType_reconstruct, (scalar_type,)
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
# Deprecated 2022-11-22, NumPy 1.25.
|
||||
if name == "MachAr":
|
||||
import warnings
|
||||
warnings.warn(
|
||||
"The `np._core.MachAr` is considered private API (NumPy 1.24)",
|
||||
DeprecationWarning, stacklevel=2,
|
||||
)
|
||||
return _machar.MachAr
|
||||
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
|
||||
|
||||
|
||||
import copyreg
|
||||
|
||||
copyreg.pickle(ufunc, _ufunc_reduce)
|
||||
|
||||
@@ -1,2 +1,666 @@
|
||||
# NOTE: The `np._core` namespace is deliberately kept empty due to it
|
||||
# being private
|
||||
# keep in sync with https://github.com/numpy/numtype/blob/main/src/numpy-stubs/_core/__init__.pyi
|
||||
|
||||
from ._asarray import require
|
||||
from ._ufunc_config import (
|
||||
errstate,
|
||||
getbufsize,
|
||||
geterr,
|
||||
geterrcall,
|
||||
setbufsize,
|
||||
seterr,
|
||||
seterrcall,
|
||||
)
|
||||
from .arrayprint import (
|
||||
array2string,
|
||||
array_repr,
|
||||
array_str,
|
||||
format_float_positional,
|
||||
format_float_scientific,
|
||||
get_printoptions,
|
||||
printoptions,
|
||||
set_printoptions,
|
||||
)
|
||||
from .einsumfunc import einsum, einsum_path
|
||||
from .fromnumeric import (
|
||||
all,
|
||||
amax,
|
||||
amin,
|
||||
any,
|
||||
argmax,
|
||||
argmin,
|
||||
argpartition,
|
||||
argsort,
|
||||
around,
|
||||
choose,
|
||||
clip,
|
||||
compress,
|
||||
cumprod,
|
||||
cumsum,
|
||||
cumulative_prod,
|
||||
cumulative_sum,
|
||||
diagonal,
|
||||
matrix_transpose,
|
||||
max,
|
||||
mean,
|
||||
min,
|
||||
ndim,
|
||||
nonzero,
|
||||
partition,
|
||||
prod,
|
||||
ptp,
|
||||
put,
|
||||
ravel,
|
||||
repeat,
|
||||
reshape,
|
||||
resize,
|
||||
round,
|
||||
searchsorted,
|
||||
shape,
|
||||
size,
|
||||
sort,
|
||||
squeeze,
|
||||
std,
|
||||
sum,
|
||||
swapaxes,
|
||||
take,
|
||||
trace,
|
||||
transpose,
|
||||
transpose as permute_dims,
|
||||
var,
|
||||
)
|
||||
from .function_base import geomspace, linspace, logspace
|
||||
from .getlimits import finfo, iinfo
|
||||
from .memmap import memmap
|
||||
from .numeric import (
|
||||
False_,
|
||||
True_,
|
||||
allclose,
|
||||
arange,
|
||||
argwhere,
|
||||
array,
|
||||
array_equal,
|
||||
array_equiv,
|
||||
asanyarray,
|
||||
asarray,
|
||||
ascontiguousarray,
|
||||
asfortranarray,
|
||||
astype,
|
||||
base_repr,
|
||||
binary_repr,
|
||||
bitwise_not,
|
||||
broadcast,
|
||||
can_cast,
|
||||
concatenate,
|
||||
concatenate as concat,
|
||||
convolve,
|
||||
copyto,
|
||||
correlate,
|
||||
count_nonzero,
|
||||
cross,
|
||||
dot,
|
||||
dtype,
|
||||
empty,
|
||||
empty_like,
|
||||
flatiter,
|
||||
flatnonzero,
|
||||
from_dlpack,
|
||||
frombuffer,
|
||||
fromfile,
|
||||
fromfunction,
|
||||
fromiter,
|
||||
fromstring,
|
||||
full,
|
||||
full_like,
|
||||
identity,
|
||||
indices,
|
||||
inf,
|
||||
inner,
|
||||
isclose,
|
||||
isfortran,
|
||||
isscalar,
|
||||
lexsort,
|
||||
little_endian,
|
||||
matmul,
|
||||
may_share_memory,
|
||||
min_scalar_type,
|
||||
moveaxis,
|
||||
nan,
|
||||
ndarray,
|
||||
nditer,
|
||||
nested_iters,
|
||||
newaxis,
|
||||
ones,
|
||||
ones_like,
|
||||
outer,
|
||||
promote_types,
|
||||
putmask,
|
||||
result_type,
|
||||
roll,
|
||||
rollaxis,
|
||||
shares_memory,
|
||||
tensordot,
|
||||
ufunc,
|
||||
vdot,
|
||||
vecdot,
|
||||
where,
|
||||
zeros,
|
||||
zeros_like,
|
||||
)
|
||||
from .numerictypes import (
|
||||
ScalarType,
|
||||
bool,
|
||||
bool_,
|
||||
busday_count,
|
||||
busday_offset,
|
||||
busdaycalendar,
|
||||
byte,
|
||||
bytes_,
|
||||
cdouble,
|
||||
character,
|
||||
clongdouble,
|
||||
complex64,
|
||||
complex128,
|
||||
complex192,
|
||||
complex256,
|
||||
complexfloating,
|
||||
csingle,
|
||||
datetime64,
|
||||
datetime_as_string,
|
||||
datetime_data,
|
||||
double,
|
||||
flexible,
|
||||
float16,
|
||||
float32,
|
||||
float64,
|
||||
float96,
|
||||
float128,
|
||||
floating,
|
||||
generic,
|
||||
half,
|
||||
inexact,
|
||||
int8,
|
||||
int16,
|
||||
int32,
|
||||
int64,
|
||||
int_,
|
||||
intc,
|
||||
integer,
|
||||
intp,
|
||||
is_busday,
|
||||
isdtype,
|
||||
issubdtype,
|
||||
long,
|
||||
longdouble,
|
||||
longlong,
|
||||
number,
|
||||
object_,
|
||||
sctypeDict,
|
||||
short,
|
||||
signedinteger,
|
||||
single,
|
||||
str_,
|
||||
timedelta64,
|
||||
typecodes,
|
||||
ubyte,
|
||||
uint,
|
||||
uint8,
|
||||
uint16,
|
||||
uint32,
|
||||
uint64,
|
||||
uintc,
|
||||
uintp,
|
||||
ulong,
|
||||
ulonglong,
|
||||
unsignedinteger,
|
||||
ushort,
|
||||
void,
|
||||
)
|
||||
from .records import recarray, record
|
||||
from .shape_base import (
|
||||
atleast_1d,
|
||||
atleast_2d,
|
||||
atleast_3d,
|
||||
block,
|
||||
hstack,
|
||||
stack,
|
||||
unstack,
|
||||
vstack,
|
||||
)
|
||||
from .umath import (
|
||||
absolute,
|
||||
absolute as abs,
|
||||
add,
|
||||
arccos,
|
||||
arccos as acos,
|
||||
arccosh,
|
||||
arccosh as acosh,
|
||||
arcsin,
|
||||
arcsin as asin,
|
||||
arcsinh,
|
||||
arcsinh as asinh,
|
||||
arctan,
|
||||
arctan as atan,
|
||||
arctan2,
|
||||
arctan2 as atan2,
|
||||
arctanh,
|
||||
arctanh as atanh,
|
||||
bitwise_and,
|
||||
bitwise_count,
|
||||
bitwise_or,
|
||||
bitwise_xor,
|
||||
cbrt,
|
||||
ceil,
|
||||
conj,
|
||||
conjugate,
|
||||
copysign,
|
||||
cos,
|
||||
cosh,
|
||||
deg2rad,
|
||||
degrees,
|
||||
divide,
|
||||
divmod,
|
||||
e,
|
||||
equal,
|
||||
euler_gamma,
|
||||
exp,
|
||||
exp2,
|
||||
expm1,
|
||||
fabs,
|
||||
float_power,
|
||||
floor,
|
||||
floor_divide,
|
||||
fmax,
|
||||
fmin,
|
||||
fmod,
|
||||
frexp,
|
||||
frompyfunc,
|
||||
gcd,
|
||||
greater,
|
||||
greater_equal,
|
||||
heaviside,
|
||||
hypot,
|
||||
invert,
|
||||
invert as bitwise_invert,
|
||||
isfinite,
|
||||
isinf,
|
||||
isnan,
|
||||
isnat,
|
||||
lcm,
|
||||
ldexp,
|
||||
left_shift,
|
||||
left_shift as bitwise_left_shift,
|
||||
less,
|
||||
less_equal,
|
||||
log,
|
||||
log1p,
|
||||
log2,
|
||||
log10,
|
||||
logaddexp,
|
||||
logaddexp2,
|
||||
logical_and,
|
||||
logical_not,
|
||||
logical_or,
|
||||
logical_xor,
|
||||
matvec,
|
||||
maximum,
|
||||
minimum,
|
||||
mod,
|
||||
modf,
|
||||
multiply,
|
||||
negative,
|
||||
nextafter,
|
||||
not_equal,
|
||||
pi,
|
||||
positive,
|
||||
power,
|
||||
power as pow,
|
||||
rad2deg,
|
||||
radians,
|
||||
reciprocal,
|
||||
remainder,
|
||||
right_shift,
|
||||
right_shift as bitwise_right_shift,
|
||||
rint,
|
||||
sign,
|
||||
signbit,
|
||||
sin,
|
||||
sinh,
|
||||
spacing,
|
||||
sqrt,
|
||||
square,
|
||||
subtract,
|
||||
tan,
|
||||
tanh,
|
||||
true_divide,
|
||||
trunc,
|
||||
vecmat,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"False_",
|
||||
"ScalarType",
|
||||
"True_",
|
||||
"abs",
|
||||
"absolute",
|
||||
"acos",
|
||||
"acosh",
|
||||
"add",
|
||||
"all",
|
||||
"allclose",
|
||||
"amax",
|
||||
"amin",
|
||||
"any",
|
||||
"arange",
|
||||
"arccos",
|
||||
"arccosh",
|
||||
"arcsin",
|
||||
"arcsinh",
|
||||
"arctan",
|
||||
"arctan2",
|
||||
"arctanh",
|
||||
"argmax",
|
||||
"argmin",
|
||||
"argpartition",
|
||||
"argsort",
|
||||
"argwhere",
|
||||
"around",
|
||||
"array",
|
||||
"array2string",
|
||||
"array_equal",
|
||||
"array_equiv",
|
||||
"array_repr",
|
||||
"array_str",
|
||||
"asanyarray",
|
||||
"asarray",
|
||||
"ascontiguousarray",
|
||||
"asfortranarray",
|
||||
"asin",
|
||||
"asinh",
|
||||
"astype",
|
||||
"atan",
|
||||
"atan2",
|
||||
"atanh",
|
||||
"atleast_1d",
|
||||
"atleast_2d",
|
||||
"atleast_3d",
|
||||
"base_repr",
|
||||
"binary_repr",
|
||||
"bitwise_and",
|
||||
"bitwise_count",
|
||||
"bitwise_invert",
|
||||
"bitwise_left_shift",
|
||||
"bitwise_not",
|
||||
"bitwise_or",
|
||||
"bitwise_right_shift",
|
||||
"bitwise_xor",
|
||||
"block",
|
||||
"bool",
|
||||
"bool_",
|
||||
"broadcast",
|
||||
"busday_count",
|
||||
"busday_offset",
|
||||
"busdaycalendar",
|
||||
"byte",
|
||||
"bytes_",
|
||||
"can_cast",
|
||||
"cbrt",
|
||||
"cdouble",
|
||||
"ceil",
|
||||
"character",
|
||||
"choose",
|
||||
"clip",
|
||||
"clongdouble",
|
||||
"complex64",
|
||||
"complex128",
|
||||
"complex192",
|
||||
"complex256",
|
||||
"complexfloating",
|
||||
"compress",
|
||||
"concat",
|
||||
"concatenate",
|
||||
"conj",
|
||||
"conjugate",
|
||||
"convolve",
|
||||
"copysign",
|
||||
"copyto",
|
||||
"correlate",
|
||||
"cos",
|
||||
"cosh",
|
||||
"count_nonzero",
|
||||
"cross",
|
||||
"csingle",
|
||||
"cumprod",
|
||||
"cumsum",
|
||||
"cumulative_prod",
|
||||
"cumulative_sum",
|
||||
"datetime64",
|
||||
"datetime_as_string",
|
||||
"datetime_data",
|
||||
"deg2rad",
|
||||
"degrees",
|
||||
"diagonal",
|
||||
"divide",
|
||||
"divmod",
|
||||
"dot",
|
||||
"double",
|
||||
"dtype",
|
||||
"e",
|
||||
"einsum",
|
||||
"einsum_path",
|
||||
"empty",
|
||||
"empty_like",
|
||||
"equal",
|
||||
"errstate",
|
||||
"euler_gamma",
|
||||
"exp",
|
||||
"exp2",
|
||||
"expm1",
|
||||
"fabs",
|
||||
"finfo",
|
||||
"flatiter",
|
||||
"flatnonzero",
|
||||
"flexible",
|
||||
"float16",
|
||||
"float32",
|
||||
"float64",
|
||||
"float96",
|
||||
"float128",
|
||||
"float_power",
|
||||
"floating",
|
||||
"floor",
|
||||
"floor_divide",
|
||||
"fmax",
|
||||
"fmin",
|
||||
"fmod",
|
||||
"format_float_positional",
|
||||
"format_float_scientific",
|
||||
"frexp",
|
||||
"from_dlpack",
|
||||
"frombuffer",
|
||||
"fromfile",
|
||||
"fromfunction",
|
||||
"fromiter",
|
||||
"frompyfunc",
|
||||
"fromstring",
|
||||
"full",
|
||||
"full_like",
|
||||
"gcd",
|
||||
"generic",
|
||||
"geomspace",
|
||||
"get_printoptions",
|
||||
"getbufsize",
|
||||
"geterr",
|
||||
"geterrcall",
|
||||
"greater",
|
||||
"greater_equal",
|
||||
"half",
|
||||
"heaviside",
|
||||
"hstack",
|
||||
"hypot",
|
||||
"identity",
|
||||
"iinfo",
|
||||
"indices",
|
||||
"inexact",
|
||||
"inf",
|
||||
"inner",
|
||||
"int8",
|
||||
"int16",
|
||||
"int32",
|
||||
"int64",
|
||||
"int_",
|
||||
"intc",
|
||||
"integer",
|
||||
"intp",
|
||||
"invert",
|
||||
"is_busday",
|
||||
"isclose",
|
||||
"isdtype",
|
||||
"isfinite",
|
||||
"isfortran",
|
||||
"isinf",
|
||||
"isnan",
|
||||
"isnat",
|
||||
"isscalar",
|
||||
"issubdtype",
|
||||
"lcm",
|
||||
"ldexp",
|
||||
"left_shift",
|
||||
"less",
|
||||
"less_equal",
|
||||
"lexsort",
|
||||
"linspace",
|
||||
"little_endian",
|
||||
"log",
|
||||
"log1p",
|
||||
"log2",
|
||||
"log10",
|
||||
"logaddexp",
|
||||
"logaddexp2",
|
||||
"logical_and",
|
||||
"logical_not",
|
||||
"logical_or",
|
||||
"logical_xor",
|
||||
"logspace",
|
||||
"long",
|
||||
"longdouble",
|
||||
"longlong",
|
||||
"matmul",
|
||||
"matrix_transpose",
|
||||
"matvec",
|
||||
"max",
|
||||
"maximum",
|
||||
"may_share_memory",
|
||||
"mean",
|
||||
"memmap",
|
||||
"min",
|
||||
"min_scalar_type",
|
||||
"minimum",
|
||||
"mod",
|
||||
"modf",
|
||||
"moveaxis",
|
||||
"multiply",
|
||||
"nan",
|
||||
"ndarray",
|
||||
"ndim",
|
||||
"nditer",
|
||||
"negative",
|
||||
"nested_iters",
|
||||
"newaxis",
|
||||
"nextafter",
|
||||
"nonzero",
|
||||
"not_equal",
|
||||
"number",
|
||||
"object_",
|
||||
"ones",
|
||||
"ones_like",
|
||||
"outer",
|
||||
"partition",
|
||||
"permute_dims",
|
||||
"pi",
|
||||
"positive",
|
||||
"pow",
|
||||
"power",
|
||||
"printoptions",
|
||||
"prod",
|
||||
"promote_types",
|
||||
"ptp",
|
||||
"put",
|
||||
"putmask",
|
||||
"rad2deg",
|
||||
"radians",
|
||||
"ravel",
|
||||
"recarray",
|
||||
"reciprocal",
|
||||
"record",
|
||||
"remainder",
|
||||
"repeat",
|
||||
"require",
|
||||
"reshape",
|
||||
"resize",
|
||||
"result_type",
|
||||
"right_shift",
|
||||
"rint",
|
||||
"roll",
|
||||
"rollaxis",
|
||||
"round",
|
||||
"sctypeDict",
|
||||
"searchsorted",
|
||||
"set_printoptions",
|
||||
"setbufsize",
|
||||
"seterr",
|
||||
"seterrcall",
|
||||
"shape",
|
||||
"shares_memory",
|
||||
"short",
|
||||
"sign",
|
||||
"signbit",
|
||||
"signedinteger",
|
||||
"sin",
|
||||
"single",
|
||||
"sinh",
|
||||
"size",
|
||||
"sort",
|
||||
"spacing",
|
||||
"sqrt",
|
||||
"square",
|
||||
"squeeze",
|
||||
"stack",
|
||||
"std",
|
||||
"str_",
|
||||
"subtract",
|
||||
"sum",
|
||||
"swapaxes",
|
||||
"take",
|
||||
"tan",
|
||||
"tanh",
|
||||
"tensordot",
|
||||
"timedelta64",
|
||||
"trace",
|
||||
"transpose",
|
||||
"true_divide",
|
||||
"trunc",
|
||||
"typecodes",
|
||||
"ubyte",
|
||||
"ufunc",
|
||||
"uint",
|
||||
"uint8",
|
||||
"uint16",
|
||||
"uint32",
|
||||
"uint64",
|
||||
"uintc",
|
||||
"uintp",
|
||||
"ulong",
|
||||
"ulonglong",
|
||||
"unsignedinteger",
|
||||
"unstack",
|
||||
"ushort",
|
||||
"var",
|
||||
"vdot",
|
||||
"vecdot",
|
||||
"vecmat",
|
||||
"void",
|
||||
"vstack",
|
||||
"where",
|
||||
"zeros",
|
||||
"zeros_like",
|
||||
]
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,2 @@
|
||||
from .function_base import add_newdoc as add_newdoc
|
||||
from .overrides import get_array_function_like_doc as get_array_function_like_doc
|
||||
|
||||
def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ...
|
||||
|
||||
@@ -6,8 +6,7 @@ platform-dependent information.
|
||||
import os
|
||||
import sys
|
||||
|
||||
from numpy._core import dtype
|
||||
from numpy._core import numerictypes as _numerictypes
|
||||
from numpy._core import dtype, numerictypes as _numerictypes
|
||||
from numpy._core.function_base import add_newdoc
|
||||
|
||||
##############################################################################
|
||||
@@ -49,7 +48,7 @@ possible_aliases = numeric_type_aliases([
|
||||
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
|
||||
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
|
||||
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
def _get_platform_and_machine():
|
||||
@@ -68,258 +67,240 @@ def _get_platform_and_machine():
|
||||
_system, _machine = _get_platform_and_machine()
|
||||
_doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
|
||||
|
||||
# docstring prefix that cpython uses to populate `__text_signature__`
|
||||
_ARGUMENT_CLINIC_TEMPLATE = """{name}{signature}
|
||||
--
|
||||
|
||||
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
|
||||
{docstring}"""
|
||||
|
||||
def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None:
|
||||
# note: `:field: value` is rST syntax which renders as field lists.
|
||||
o = getattr(_numerictypes, obj)
|
||||
cls = getattr(_numerictypes, name)
|
||||
module = cls.__module__
|
||||
|
||||
character_code = dtype(o).char
|
||||
canonical_name_doc = "" if obj == o.__name__ else \
|
||||
f":Canonical name: `numpy.{obj}`\n "
|
||||
if fixed_aliases:
|
||||
alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "
|
||||
for alias in fixed_aliases)
|
||||
else:
|
||||
alias_doc = ''
|
||||
alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "
|
||||
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
|
||||
lines_extra = [
|
||||
"", # blank line after main doc
|
||||
f":Character code: ``{dtype(cls).char!r}``",
|
||||
]
|
||||
|
||||
docstring = f"""
|
||||
{doc.strip()}
|
||||
if name != cls.__name__:
|
||||
lines_extra.append(f":Canonical name: `{module}.{name}`")
|
||||
|
||||
:Character code: ``'{character_code}'``
|
||||
{canonical_name_doc}{alias_doc}
|
||||
"""
|
||||
lines_extra.extend(
|
||||
f"{_doc_alias_string} `{module}.{alias}`: {doc}."
|
||||
for alias_type, alias, doc in possible_aliases
|
||||
if alias_type is cls
|
||||
)
|
||||
|
||||
add_newdoc('numpy._core.numerictypes', obj, docstring)
|
||||
docstring = _ARGUMENT_CLINIC_TEMPLATE.format(
|
||||
name=cls.__name__, # must match the class name
|
||||
signature=text_signature,
|
||||
docstring="\n".join([doc.strip(), *lines_extra]),
|
||||
)
|
||||
add_newdoc('numpy._core.numerictypes', name, docstring)
|
||||
|
||||
|
||||
_bool_docstring = (
|
||||
"""
|
||||
Boolean type (True or False), stored as a byte.
|
||||
for bool_name in ('bool', 'bool_'):
|
||||
add_newdoc_for_scalar_type(bool_name, '(value=False, /)', """
|
||||
Boolean type (True or False), stored as a byte.
|
||||
|
||||
.. warning::
|
||||
.. warning::
|
||||
|
||||
The :class:`bool` type is not a subclass of the :class:`int_` type
|
||||
(the :class:`bool` is not even a number type). This is different
|
||||
than Python's default implementation of :class:`bool` as a
|
||||
sub-class of :class:`int`.
|
||||
"""
|
||||
)
|
||||
The :class:`bool` type is not a subclass of the :class:`int_` type
|
||||
(the :class:`bool` is not even a number type). This is different
|
||||
than Python's default implementation of :class:`bool` as a
|
||||
sub-class of :class:`int`.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('bool', [], _bool_docstring)
|
||||
add_newdoc_for_scalar_type('byte', '(value=0, /)', """
|
||||
Signed integer type, compatible with C ``char``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('bool_', [], _bool_docstring)
|
||||
add_newdoc_for_scalar_type('short', '(value=0, /)', """
|
||||
Signed integer type, compatible with C ``short``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('byte', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``char``.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('intc', '(value=0, /)', """
|
||||
Signed integer type, compatible with C ``int``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('short', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``short``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('intc', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``int``.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('long', '(value=0, /)', """
|
||||
Signed integer type, compatible with C ``long``.
|
||||
""")
|
||||
|
||||
# TODO: These docs probably need an if to highlight the default rather than
|
||||
# the C-types (and be correct).
|
||||
add_newdoc_for_scalar_type('int_', [],
|
||||
"""
|
||||
Default signed integer type, 64bit on 64bit systems and 32bit on 32bit
|
||||
systems.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('int_', '(value=0, /)', """
|
||||
Default signed integer type, 64bit on 64bit systems and 32bit on 32bit systems.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('longlong', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``long long``.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('longlong', '(value=0, /)', """
|
||||
Signed integer type, compatible with C ``long long``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('ubyte', [],
|
||||
"""
|
||||
Unsigned integer type, compatible with C ``unsigned char``.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('ubyte', '(value=0, /)', """
|
||||
Unsigned integer type, compatible with C ``unsigned char``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('ushort', [],
|
||||
"""
|
||||
Unsigned integer type, compatible with C ``unsigned short``.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('ushort', '(value=0, /)', """
|
||||
Unsigned integer type, compatible with C ``unsigned short``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('uintc', [],
|
||||
"""
|
||||
Unsigned integer type, compatible with C ``unsigned int``.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('uintc', '(value=0, /)', """
|
||||
Unsigned integer type, compatible with C ``unsigned int``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('uint', [],
|
||||
"""
|
||||
Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit
|
||||
systems.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('uint', '(value=0, /)', """
|
||||
Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit systems.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('ulonglong', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``unsigned long long``.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('ulong', '(value=0, /)', """
|
||||
Unsigned integer type, compatible with C ``unsigned long``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('half', [],
|
||||
"""
|
||||
Half-precision floating-point number type.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('ulonglong', '(value=0, /)', """
|
||||
Unsigned integer type, compatible with C ``unsigned long long``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('single', [],
|
||||
"""
|
||||
Single-precision floating-point number type, compatible with C ``float``.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('half', '(value=0, /)', """
|
||||
Half-precision floating-point number type.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('double', [],
|
||||
"""
|
||||
Double-precision floating-point number type, compatible with Python
|
||||
:class:`float` and C ``double``.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('single', '(value=0, /)', """
|
||||
Single-precision floating-point number type, compatible with C ``float``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('longdouble', [],
|
||||
"""
|
||||
Extended-precision floating-point number type, compatible with C
|
||||
``long double`` but not necessarily with IEEE 754 quadruple-precision.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('double', '(value=0, /)', """
|
||||
Double-precision floating-point number type, compatible with Python :class:`float` and C ``double``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('csingle', [],
|
||||
"""
|
||||
Complex number type composed of two single-precision floating-point
|
||||
numbers.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('longdouble', '(value=0, /)', """
|
||||
Extended-precision floating-point number type, compatible with C ``long double``
|
||||
but not necessarily with IEEE 754 quadruple-precision.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('cdouble', [],
|
||||
"""
|
||||
Complex number type composed of two double-precision floating-point
|
||||
numbers, compatible with Python :class:`complex`.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('csingle', '(real=0, imag=0, /)', """
|
||||
Complex number type composed of two single-precision floating-point numbers.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('clongdouble', [],
|
||||
"""
|
||||
Complex number type composed of two extended-precision floating-point
|
||||
numbers.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('cdouble', '(real=0, imag=0, /)', """
|
||||
Complex number type composed of two double-precision floating-point numbers,
|
||||
compatible with Python :class:`complex`.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('object_', [],
|
||||
"""
|
||||
Any Python object.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('clongdouble', '(real=0, imag=0, /)', """
|
||||
Complex number type composed of two extended-precision floating-point numbers.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('str_', [],
|
||||
r"""
|
||||
A unicode string.
|
||||
add_newdoc_for_scalar_type('object_', '(value=None, /)', """
|
||||
Any Python object.
|
||||
""")
|
||||
|
||||
This type strips trailing null codepoints.
|
||||
add_newdoc_for_scalar_type('str_', '(value="", /, *args, **kwargs)', r"""
|
||||
A unicode string.
|
||||
|
||||
>>> s = np.str_("abc\x00")
|
||||
>>> s
|
||||
'abc'
|
||||
This type strips trailing null codepoints.
|
||||
|
||||
Unlike the builtin :class:`str`, this supports the
|
||||
:ref:`python:bufferobjects`, exposing its contents as UCS4:
|
||||
>>> s = np.str_("abc\x00")
|
||||
>>> s
|
||||
'abc'
|
||||
|
||||
>>> m = memoryview(np.str_("abc"))
|
||||
>>> m.format
|
||||
'3w'
|
||||
>>> m.tobytes()
|
||||
b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
|
||||
""")
|
||||
Unlike the builtin :class:`str`, this supports the
|
||||
:ref:`python:bufferobjects`, exposing its contents as UCS4:
|
||||
|
||||
add_newdoc_for_scalar_type('bytes_', [],
|
||||
r"""
|
||||
A byte string.
|
||||
>>> m = memoryview(np.str_("abc"))
|
||||
>>> m.format
|
||||
'3w'
|
||||
>>> m.tobytes()
|
||||
b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
|
||||
""")
|
||||
|
||||
When used in arrays, this type strips trailing null bytes.
|
||||
""")
|
||||
add_newdoc_for_scalar_type('bytes_', '(value="", /, *args, **kwargs)', r"""
|
||||
A byte string.
|
||||
|
||||
add_newdoc_for_scalar_type('void', [],
|
||||
r"""
|
||||
np.void(length_or_data, /, dtype=None)
|
||||
When used in arrays, this type strips trailing null bytes.
|
||||
""")
|
||||
|
||||
Create a new structured or unstructured void scalar.
|
||||
add_newdoc_for_scalar_type('void', '(length_or_data, /, dtype=None)', r"""
|
||||
np.void(length_or_data, /, dtype=None)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
length_or_data : int, array-like, bytes-like, object
|
||||
One of multiple meanings (see notes). The length or
|
||||
bytes data of an unstructured void. Or alternatively,
|
||||
the data to be stored in the new scalar when `dtype`
|
||||
is provided.
|
||||
This can be an array-like, in which case an array may
|
||||
be returned.
|
||||
dtype : dtype, optional
|
||||
If provided the dtype of the new scalar. This dtype must
|
||||
be "void" dtype (i.e. a structured or unstructured void,
|
||||
see also :ref:`defining-structured-types`).
|
||||
Create a new structured or unstructured void scalar.
|
||||
|
||||
.. versionadded:: 1.24
|
||||
Parameters
|
||||
----------
|
||||
length_or_data : int, array-like, bytes-like, object
|
||||
One of multiple meanings (see notes). The length or
|
||||
bytes data of an unstructured void. Or alternatively,
|
||||
the data to be stored in the new scalar when `dtype`
|
||||
is provided.
|
||||
This can be an array-like, in which case an array may
|
||||
be returned.
|
||||
dtype : dtype, optional
|
||||
If provided the dtype of the new scalar. This dtype must
|
||||
be "void" dtype (i.e. a structured or unstructured void,
|
||||
see also :ref:`defining-structured-types`).
|
||||
|
||||
Notes
|
||||
-----
|
||||
For historical reasons and because void scalars can represent both
|
||||
arbitrary byte data and structured dtypes, the void constructor
|
||||
has three calling conventions:
|
||||
.. versionadded:: 1.24
|
||||
|
||||
1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
|
||||
``\0`` bytes. The 5 can be a Python or NumPy integer.
|
||||
2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
|
||||
The dtype itemsize will match the byte string length, here ``"V10"``.
|
||||
3. When a ``dtype=`` is passed the call is roughly the same as an
|
||||
array creation. However, a void scalar rather than array is returned.
|
||||
Notes
|
||||
-----
|
||||
For historical reasons and because void scalars can represent both
|
||||
arbitrary byte data and structured dtypes, the void constructor
|
||||
has three calling conventions:
|
||||
|
||||
Please see the examples which show all three different conventions.
|
||||
1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
|
||||
``\0`` bytes. The 5 can be a Python or NumPy integer.
|
||||
2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
|
||||
The dtype itemsize will match the byte string length, here ``"V10"``.
|
||||
3. When a ``dtype=`` is passed the call is roughly the same as an
|
||||
array creation. However, a void scalar rather than array is returned.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.void(5)
|
||||
np.void(b'\x00\x00\x00\x00\x00')
|
||||
>>> np.void(b'abcd')
|
||||
np.void(b'\x61\x62\x63\x64')
|
||||
>>> np.void((3.2, b'eggs'), dtype="d,S5")
|
||||
np.void((3.2, b'eggs'), dtype=[('f0', '<f8'), ('f1', 'S5')])
|
||||
>>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
|
||||
np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')])
|
||||
Please see the examples which show all three different conventions.
|
||||
|
||||
""")
|
||||
Examples
|
||||
--------
|
||||
>>> np.void(5)
|
||||
np.void(b'\x00\x00\x00\x00\x00')
|
||||
>>> np.void(b'abcd')
|
||||
np.void(b'\x61\x62\x63\x64')
|
||||
>>> np.void((3.2, b'eggs'), dtype="d,S5")
|
||||
np.void((3.2, b'eggs'), dtype=[('f0', '<f8'), ('f1', 'S5')])
|
||||
>>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
|
||||
np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')])
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('datetime64', [],
|
||||
"""
|
||||
If created from a 64-bit integer, it represents an offset from
|
||||
``1970-01-01T00:00:00``.
|
||||
If created from string, the string can be in ISO 8601 date
|
||||
or datetime format.
|
||||
add_newdoc_for_scalar_type('datetime64', '(value=None, /, *args)', """
|
||||
If created from a 64-bit integer, it represents an offset from ``1970-01-01T00:00:00``.
|
||||
If created from string, the string can be in ISO 8601 date or datetime format.
|
||||
|
||||
When parsing a string to create a datetime object, if the string contains
|
||||
a trailing timezone (A 'Z' or a timezone offset), the timezone will be
|
||||
dropped and a User Warning is given.
|
||||
When parsing a string to create a datetime object, if the string contains
|
||||
a trailing timezone (A 'Z' or a timezone offset), the timezone will be
|
||||
dropped and a User Warning is given.
|
||||
|
||||
Datetime64 objects should be considered to be UTC and therefore have an
|
||||
offset of +0000.
|
||||
Datetime64 objects should be considered to be UTC and therefore have an
|
||||
offset of +0000.
|
||||
|
||||
>>> np.datetime64(10, 'Y')
|
||||
np.datetime64('1980')
|
||||
>>> np.datetime64('1980', 'Y')
|
||||
np.datetime64('1980')
|
||||
>>> np.datetime64(10, 'D')
|
||||
np.datetime64('1970-01-11')
|
||||
>>> np.datetime64(10, 'Y')
|
||||
np.datetime64('1980')
|
||||
>>> np.datetime64('1980', 'Y')
|
||||
np.datetime64('1980')
|
||||
>>> np.datetime64(10, 'D')
|
||||
np.datetime64('1970-01-11')
|
||||
|
||||
See :ref:`arrays.datetime` for more information.
|
||||
""")
|
||||
See :ref:`arrays.datetime` for more information.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('timedelta64', [],
|
||||
"""
|
||||
A timedelta stored as a 64-bit integer.
|
||||
add_newdoc_for_scalar_type('timedelta64', '(value=0, /, *args)', """
|
||||
A timedelta stored as a 64-bit integer.
|
||||
|
||||
See :ref:`arrays.datetime` for more information.
|
||||
""")
|
||||
See :ref:`arrays.datetime` for more information.
|
||||
""")
|
||||
|
||||
add_newdoc('numpy._core.numerictypes', "integer", ('is_integer',
|
||||
"""
|
||||
is_integer($self, /)
|
||||
--
|
||||
|
||||
integer.is_integer() -> bool
|
||||
|
||||
Return ``True`` if the number is finite with integral value.
|
||||
@@ -339,6 +320,9 @@ add_newdoc('numpy._core.numerictypes', "integer", ('is_integer',
|
||||
for float_name in ('half', 'single', 'double', 'longdouble'):
|
||||
add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio',
|
||||
f"""
|
||||
as_integer_ratio($self, /)
|
||||
--
|
||||
|
||||
{float_name}.as_integer_ratio() -> (int, int)
|
||||
|
||||
Return a pair of integers, whose ratio is exactly equal to the original
|
||||
@@ -355,6 +339,9 @@ for float_name in ('half', 'single', 'double', 'longdouble'):
|
||||
|
||||
add_newdoc('numpy._core.numerictypes', float_name, ('is_integer',
|
||||
f"""
|
||||
is_integer($self, /)
|
||||
--
|
||||
|
||||
{float_name}.is_integer() -> bool
|
||||
|
||||
Return ``True`` if the floating point number is finite with integral
|
||||
@@ -371,10 +358,14 @@ for float_name in ('half', 'single', 'double', 'longdouble'):
|
||||
"""))
|
||||
|
||||
for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
|
||||
'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
|
||||
'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64',
|
||||
'longlong', 'ulonglong'):
|
||||
# Add negative examples for signed cases by checking typecode
|
||||
add_newdoc('numpy._core.numerictypes', int_name, ('bit_count',
|
||||
f"""
|
||||
bit_count($self, /)
|
||||
--
|
||||
|
||||
{int_name}.bit_count() -> int
|
||||
|
||||
Computes the number of 1-bits in the absolute value of the input.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
from collections.abc import Iterable
|
||||
from typing import Final
|
||||
|
||||
import numpy as np
|
||||
@@ -8,9 +7,10 @@ _system: Final[str] = ...
|
||||
_machine: Final[str] = ...
|
||||
_doc_alias_string: Final[str] = ...
|
||||
_bool_docstring: Final[str] = ...
|
||||
bool_name: str = ...
|
||||
int_name: str = ...
|
||||
float_name: str = ...
|
||||
|
||||
def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ...
|
||||
def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ...
|
||||
def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: ...
|
||||
def _get_platform_and_machine() -> tuple[str, str]: ...
|
||||
|
||||
@@ -4,11 +4,7 @@ Functions in the ``as*array`` family that promote array-likes into arrays.
|
||||
`require` fits this category despite its name not matching this pattern.
|
||||
"""
|
||||
from .multiarray import array, asanyarray
|
||||
from .overrides import (
|
||||
array_function_dispatch,
|
||||
finalize_array_function_like,
|
||||
set_module,
|
||||
)
|
||||
from .overrides import array_function_dispatch, finalize_array_function_like, set_module
|
||||
|
||||
__all__ = ["require"]
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ from typing import Any, Literal, TypeAlias, TypeVar, overload
|
||||
|
||||
from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc
|
||||
|
||||
__all__ = ["require"]
|
||||
|
||||
_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])
|
||||
|
||||
_Requirements: TypeAlias = Literal[
|
||||
@@ -18,24 +20,24 @@ _RequirementsWithE: TypeAlias = _Requirements | _E
|
||||
@overload
|
||||
def require(
|
||||
a: _ArrayT,
|
||||
dtype: None = ...,
|
||||
requirements: _Requirements | Iterable[_Requirements] | None = ...,
|
||||
dtype: None = None,
|
||||
requirements: _Requirements | Iterable[_Requirements] | None = None,
|
||||
*,
|
||||
like: _SupportsArrayFunc = ...
|
||||
like: _SupportsArrayFunc | None = None
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def require(
|
||||
a: object,
|
||||
dtype: DTypeLike = ...,
|
||||
requirements: _E | Iterable[_RequirementsWithE] = ...,
|
||||
dtype: DTypeLike | None = None,
|
||||
requirements: _E | Iterable[_RequirementsWithE] | None = None,
|
||||
*,
|
||||
like: _SupportsArrayFunc = ...
|
||||
like: _SupportsArrayFunc | None = None
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def require(
|
||||
a: object,
|
||||
dtype: DTypeLike = ...,
|
||||
requirements: _Requirements | Iterable[_Requirements] | None = ...,
|
||||
dtype: DTypeLike | None = None,
|
||||
requirements: _Requirements | Iterable[_Requirements] | None = None,
|
||||
*,
|
||||
like: _SupportsArrayFunc = ...
|
||||
like: _SupportsArrayFunc | None = None
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from typing import Final, TypeAlias, TypedDict, overload, type_check_only
|
||||
from typing import Literal as L
|
||||
|
||||
from typing import Final, Literal as L, TypeAlias, TypedDict, overload, type_check_only
|
||||
from typing_extensions import ReadOnly, TypeVar
|
||||
|
||||
import numpy as np
|
||||
|
||||
@@ -3,7 +3,6 @@ from typing import Any, Final, TypeVar, overload
|
||||
|
||||
import numpy as np
|
||||
from numpy import _CastingKind
|
||||
from numpy._utils import set_module as set_module
|
||||
|
||||
###
|
||||
|
||||
|
||||
@@ -365,46 +365,6 @@ class _ctypes:
|
||||
"""
|
||||
return self.data_as(ctypes.c_void_p)
|
||||
|
||||
# Numpy 1.21.0, 2021-05-18
|
||||
|
||||
def get_data(self):
|
||||
"""Deprecated getter for the `_ctypes.data` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn('"get_data" is deprecated. Use "data" instead',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return self.data
|
||||
|
||||
def get_shape(self):
|
||||
"""Deprecated getter for the `_ctypes.shape` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn('"get_shape" is deprecated. Use "shape" instead',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return self.shape
|
||||
|
||||
def get_strides(self):
|
||||
"""Deprecated getter for the `_ctypes.strides` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn('"get_strides" is deprecated. Use "strides" instead',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return self.strides
|
||||
|
||||
def get_as_parameter(self):
|
||||
"""Deprecated getter for the `_ctypes._as_parameter_` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn(
|
||||
'"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
|
||||
DeprecationWarning, stacklevel=2,
|
||||
)
|
||||
return self._as_parameter_
|
||||
|
||||
|
||||
def _newnames(datatype, order):
|
||||
"""
|
||||
@@ -895,6 +855,8 @@ def _ufunc_doc_signature_formatter(ufunc):
|
||||
Builds a signature string which resembles PEP 457
|
||||
|
||||
This is used to construct the first line of the docstring
|
||||
|
||||
Keep in sync with `_ufunc_inspect_signature_builder`.
|
||||
"""
|
||||
|
||||
# input arguments are simple
|
||||
@@ -933,6 +895,54 @@ def _ufunc_doc_signature_formatter(ufunc):
|
||||
return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})'
|
||||
|
||||
|
||||
def _ufunc_inspect_signature_builder(ufunc):
|
||||
"""
|
||||
Builds a ``__signature__`` string.
|
||||
|
||||
Should be kept in sync with `_ufunc_doc_signature_formatter`.
|
||||
"""
|
||||
|
||||
from inspect import Parameter, Signature
|
||||
|
||||
params = []
|
||||
|
||||
# positional-only input parameters
|
||||
if ufunc.nin == 1:
|
||||
params.append(Parameter("x", Parameter.POSITIONAL_ONLY))
|
||||
else:
|
||||
params.extend(
|
||||
Parameter(f"x{i}", Parameter.POSITIONAL_ONLY)
|
||||
for i in range(1, ufunc.nin + 1)
|
||||
)
|
||||
|
||||
# for the sake of simplicity, we only consider a single output parameter
|
||||
if ufunc.nout == 1:
|
||||
out_default = None
|
||||
else:
|
||||
out_default = (None,) * ufunc.nout
|
||||
params.append(
|
||||
Parameter("out", Parameter.POSITIONAL_OR_KEYWORD, default=out_default),
|
||||
)
|
||||
|
||||
if ufunc.signature is None:
|
||||
params.append(Parameter("where", Parameter.KEYWORD_ONLY, default=True))
|
||||
else:
|
||||
# NOTE: not all gufuncs support the `axis` parameters
|
||||
params.append(Parameter("axes", Parameter.KEYWORD_ONLY, default=_NoValue))
|
||||
params.append(Parameter("axis", Parameter.KEYWORD_ONLY, default=_NoValue))
|
||||
params.append(Parameter("keepdims", Parameter.KEYWORD_ONLY, default=False))
|
||||
|
||||
params.extend((
|
||||
Parameter("casting", Parameter.KEYWORD_ONLY, default='same_kind'),
|
||||
Parameter("order", Parameter.KEYWORD_ONLY, default='K'),
|
||||
Parameter("dtype", Parameter.KEYWORD_ONLY, default=None),
|
||||
Parameter("subok", Parameter.KEYWORD_ONLY, default=True),
|
||||
Parameter("signature", Parameter.KEYWORD_ONLY, default=None),
|
||||
))
|
||||
|
||||
return Signature(params)
|
||||
|
||||
|
||||
def npy_ctypes_check(cls):
|
||||
# determine if a class comes from ctypes, in order to work around
|
||||
# a bug in the buffer protocol for those objects, bpo-10746
|
||||
|
||||
@@ -2,8 +2,7 @@ import ctypes as ct
|
||||
import re
|
||||
from collections.abc import Callable, Iterable
|
||||
from typing import Any, Final, Generic, Self, overload
|
||||
|
||||
from typing_extensions import TypeVar, deprecated
|
||||
from typing_extensions import TypeVar
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
@@ -48,16 +47,6 @@ class _ctypes(Generic[_PT_co]):
|
||||
def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...
|
||||
def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...
|
||||
|
||||
#
|
||||
@deprecated('"get_data" is deprecated. Use "data" instead')
|
||||
def get_data(self, /) -> _PT_co: ...
|
||||
@deprecated('"get_shape" is deprecated. Use "shape" instead')
|
||||
def get_shape(self, /) -> ct.Array[c_intp]: ...
|
||||
@deprecated('"get_strides" is deprecated. Use "strides" instead')
|
||||
def get_strides(self, /) -> ct.Array[c_intp]: ...
|
||||
@deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead')
|
||||
def get_as_parameter(self, /) -> ct.c_void_p: ...
|
||||
|
||||
class dummy_ctype(Generic[_T_co]):
|
||||
_cls: type[_T_co]
|
||||
|
||||
|
||||
@@ -1,355 +0,0 @@
|
||||
"""
|
||||
Machine arithmetic - determine the parameters of the
|
||||
floating-point arithmetic system
|
||||
|
||||
Author: Pearu Peterson, September 2003
|
||||
|
||||
"""
|
||||
__all__ = ['MachAr']
|
||||
|
||||
from ._ufunc_config import errstate
|
||||
from .fromnumeric import any
|
||||
|
||||
# Need to speed this up...especially for longdouble
|
||||
|
||||
# Deprecated 2021-10-20, NumPy 1.22
|
||||
class MachAr:
|
||||
"""
|
||||
Diagnosing machine parameters.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
ibeta : int
|
||||
Radix in which numbers are represented.
|
||||
it : int
|
||||
Number of base-`ibeta` digits in the floating point mantissa M.
|
||||
machep : int
|
||||
Exponent of the smallest (most negative) power of `ibeta` that,
|
||||
added to 1.0, gives something different from 1.0
|
||||
eps : float
|
||||
Floating-point number ``beta**machep`` (floating point precision)
|
||||
negep : int
|
||||
Exponent of the smallest power of `ibeta` that, subtracted
|
||||
from 1.0, gives something different from 1.0.
|
||||
epsneg : float
|
||||
Floating-point number ``beta**negep``.
|
||||
iexp : int
|
||||
Number of bits in the exponent (including its sign and bias).
|
||||
minexp : int
|
||||
Smallest (most negative) power of `ibeta` consistent with there
|
||||
being no leading zeros in the mantissa.
|
||||
xmin : float
|
||||
Floating-point number ``beta**minexp`` (the smallest [in
|
||||
magnitude] positive floating point number with full precision).
|
||||
maxexp : int
|
||||
Smallest (positive) power of `ibeta` that causes overflow.
|
||||
xmax : float
|
||||
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
|
||||
usable floating value).
|
||||
irnd : int
|
||||
In ``range(6)``, information on what kind of rounding is done
|
||||
in addition, and on how underflow is handled.
|
||||
ngrd : int
|
||||
Number of 'guard digits' used when truncating the product
|
||||
of two mantissas to fit the representation.
|
||||
epsilon : float
|
||||
Same as `eps`.
|
||||
tiny : float
|
||||
An alias for `smallest_normal`, kept for backwards compatibility.
|
||||
huge : float
|
||||
Same as `xmax`.
|
||||
precision : float
|
||||
``- int(-log10(eps))``
|
||||
resolution : float
|
||||
``- 10**(-precision)``
|
||||
smallest_normal : float
|
||||
The smallest positive floating point number with 1 as leading bit in
|
||||
the mantissa following IEEE-754. Same as `xmin`.
|
||||
smallest_subnormal : float
|
||||
The smallest positive floating point number with 0 as leading bit in
|
||||
the mantissa following IEEE-754.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
float_conv : function, optional
|
||||
Function that converts an integer or integer array to a float
|
||||
or float array. Default is `float`.
|
||||
int_conv : function, optional
|
||||
Function that converts a float or float array to an integer or
|
||||
integer array. Default is `int`.
|
||||
float_to_float : function, optional
|
||||
Function that converts a float array to float. Default is `float`.
|
||||
Note that this does not seem to do anything useful in the current
|
||||
implementation.
|
||||
float_to_str : function, optional
|
||||
Function that converts a single float to a string. Default is
|
||||
``lambda v:'%24.16e' %v``.
|
||||
title : str, optional
|
||||
Title that is printed in the string representation of `MachAr`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
finfo : Machine limits for floating point types.
|
||||
iinfo : Machine limits for integer types.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Press, Teukolsky, Vetterling and Flannery,
|
||||
"Numerical Recipes in C++," 2nd ed,
|
||||
Cambridge University Press, 2002, p. 31.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, float_conv=float, int_conv=int,
|
||||
float_to_float=float,
|
||||
float_to_str=lambda v: f'{v:24.16e}',
|
||||
title='Python floating point number'):
|
||||
"""
|
||||
|
||||
float_conv - convert integer to float (array)
|
||||
int_conv - convert float (array) to integer
|
||||
float_to_float - convert float array to float
|
||||
float_to_str - convert array float to str
|
||||
title - description of used floating point numbers
|
||||
|
||||
"""
|
||||
# We ignore all errors here because we are purposely triggering
|
||||
# underflow to detect the properties of the running arch.
|
||||
with errstate(under='ignore'):
|
||||
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
|
||||
|
||||
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
|
||||
max_iterN = 10000
|
||||
msg = "Did not converge after %d tries with %s"
|
||||
one = float_conv(1)
|
||||
two = one + one
|
||||
zero = one - one
|
||||
|
||||
# Do we really need to do this? Aren't they 2 and 2.0?
|
||||
# Determine ibeta and beta
|
||||
a = one
|
||||
for _ in range(max_iterN):
|
||||
a = a + a
|
||||
temp = a + one
|
||||
temp1 = temp - a
|
||||
if any(temp1 - one != zero):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
b = one
|
||||
for _ in range(max_iterN):
|
||||
b = b + b
|
||||
temp = a + b
|
||||
itemp = int_conv(temp - a)
|
||||
if any(itemp != 0):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
ibeta = itemp
|
||||
beta = float_conv(ibeta)
|
||||
|
||||
# Determine it and irnd
|
||||
it = -1
|
||||
b = one
|
||||
for _ in range(max_iterN):
|
||||
it = it + 1
|
||||
b = b * beta
|
||||
temp = b + one
|
||||
temp1 = temp - b
|
||||
if any(temp1 - one != zero):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
|
||||
betah = beta / two
|
||||
a = one
|
||||
for _ in range(max_iterN):
|
||||
a = a + a
|
||||
temp = a + one
|
||||
temp1 = temp - a
|
||||
if any(temp1 - one != zero):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
temp = a + betah
|
||||
irnd = 0
|
||||
if any(temp - a != zero):
|
||||
irnd = 1
|
||||
tempa = a + beta
|
||||
temp = tempa + betah
|
||||
if irnd == 0 and any(temp - tempa != zero):
|
||||
irnd = 2
|
||||
|
||||
# Determine negep and epsneg
|
||||
negep = it + 3
|
||||
betain = one / beta
|
||||
a = one
|
||||
for i in range(negep):
|
||||
a = a * betain
|
||||
b = a
|
||||
for _ in range(max_iterN):
|
||||
temp = one - a
|
||||
if any(temp - one != zero):
|
||||
break
|
||||
a = a * beta
|
||||
negep = negep - 1
|
||||
# Prevent infinite loop on PPC with gcc 4.0:
|
||||
if negep < 0:
|
||||
raise RuntimeError("could not determine machine tolerance "
|
||||
"for 'negep', locals() -> %s" % (locals()))
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
negep = -negep
|
||||
epsneg = a
|
||||
|
||||
# Determine machep and eps
|
||||
machep = - it - 3
|
||||
a = b
|
||||
|
||||
for _ in range(max_iterN):
|
||||
temp = one + a
|
||||
if any(temp - one != zero):
|
||||
break
|
||||
a = a * beta
|
||||
machep = machep + 1
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
eps = a
|
||||
|
||||
# Determine ngrd
|
||||
ngrd = 0
|
||||
temp = one + eps
|
||||
if irnd == 0 and any(temp * one - one != zero):
|
||||
ngrd = 1
|
||||
|
||||
# Determine iexp
|
||||
i = 0
|
||||
k = 1
|
||||
z = betain
|
||||
t = one + eps
|
||||
nxres = 0
|
||||
for _ in range(max_iterN):
|
||||
y = z
|
||||
z = y * y
|
||||
a = z * one # Check here for underflow
|
||||
temp = z * t
|
||||
if any(a + a == zero) or any(abs(z) >= y):
|
||||
break
|
||||
temp1 = temp * betain
|
||||
if any(temp1 * beta == z):
|
||||
break
|
||||
i = i + 1
|
||||
k = k + k
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
if ibeta != 10:
|
||||
iexp = i + 1
|
||||
mx = k + k
|
||||
else:
|
||||
iexp = 2
|
||||
iz = ibeta
|
||||
while k >= iz:
|
||||
iz = iz * ibeta
|
||||
iexp = iexp + 1
|
||||
mx = iz + iz - 1
|
||||
|
||||
# Determine minexp and xmin
|
||||
for _ in range(max_iterN):
|
||||
xmin = y
|
||||
y = y * betain
|
||||
a = y * one
|
||||
temp = y * t
|
||||
if any((a + a) != zero) and any(abs(y) < xmin):
|
||||
k = k + 1
|
||||
temp1 = temp * betain
|
||||
if any(temp1 * beta == y) and any(temp != y):
|
||||
nxres = 3
|
||||
xmin = y
|
||||
break
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
minexp = -k
|
||||
|
||||
# Determine maxexp, xmax
|
||||
if mx <= k + k - 3 and ibeta != 10:
|
||||
mx = mx + mx
|
||||
iexp = iexp + 1
|
||||
maxexp = mx + minexp
|
||||
irnd = irnd + nxres
|
||||
if irnd >= 2:
|
||||
maxexp = maxexp - 2
|
||||
i = maxexp + minexp
|
||||
if ibeta == 2 and not i:
|
||||
maxexp = maxexp - 1
|
||||
if i > 20:
|
||||
maxexp = maxexp - 1
|
||||
if any(a != y):
|
||||
maxexp = maxexp - 2
|
||||
xmax = one - epsneg
|
||||
if any(xmax * one != xmax):
|
||||
xmax = one - beta * epsneg
|
||||
xmax = xmax / (xmin * beta * beta * beta)
|
||||
i = maxexp + minexp + 3
|
||||
for j in range(i):
|
||||
if ibeta == 2:
|
||||
xmax = xmax + xmax
|
||||
else:
|
||||
xmax = xmax * beta
|
||||
|
||||
smallest_subnormal = abs(xmin / beta ** (it))
|
||||
|
||||
self.ibeta = ibeta
|
||||
self.it = it
|
||||
self.negep = negep
|
||||
self.epsneg = float_to_float(epsneg)
|
||||
self._str_epsneg = float_to_str(epsneg)
|
||||
self.machep = machep
|
||||
self.eps = float_to_float(eps)
|
||||
self._str_eps = float_to_str(eps)
|
||||
self.ngrd = ngrd
|
||||
self.iexp = iexp
|
||||
self.minexp = minexp
|
||||
self.xmin = float_to_float(xmin)
|
||||
self._str_xmin = float_to_str(xmin)
|
||||
self.maxexp = maxexp
|
||||
self.xmax = float_to_float(xmax)
|
||||
self._str_xmax = float_to_str(xmax)
|
||||
self.irnd = irnd
|
||||
|
||||
self.title = title
|
||||
# Commonly used parameters
|
||||
self.epsilon = self.eps
|
||||
self.tiny = self.xmin
|
||||
self.huge = self.xmax
|
||||
self.smallest_normal = self.xmin
|
||||
self._str_smallest_normal = float_to_str(self.xmin)
|
||||
self.smallest_subnormal = float_to_float(smallest_subnormal)
|
||||
self._str_smallest_subnormal = float_to_str(smallest_subnormal)
|
||||
|
||||
import math
|
||||
self.precision = int(-math.log10(float_to_float(self.eps)))
|
||||
ten = two + two + two + two + two
|
||||
resolution = ten ** (-self.precision)
|
||||
self.resolution = float_to_float(resolution)
|
||||
self._str_resolution = float_to_str(resolution)
|
||||
|
||||
def __str__(self):
|
||||
fmt = (
|
||||
'Machine parameters for %(title)s\n'
|
||||
'---------------------------------------------------------------------\n'
|
||||
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
|
||||
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
|
||||
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
|
||||
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
|
||||
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
|
||||
'smallest_normal=%(smallest_normal)s '
|
||||
'smallest_subnormal=%(smallest_subnormal)s\n'
|
||||
'---------------------------------------------------------------------\n'
|
||||
)
|
||||
return fmt % self.__dict__
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(MachAr())
|
||||
@@ -1,55 +0,0 @@
|
||||
from collections.abc import Iterable
|
||||
from typing import Any, Final, TypeVar, overload
|
||||
|
||||
import numpy as np
|
||||
from numpy import _CastingKind
|
||||
from numpy._utils import set_module as set_module
|
||||
|
||||
###
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]])
|
||||
_ExceptionT = TypeVar("_ExceptionT", bound=Exception)
|
||||
|
||||
###
|
||||
|
||||
class UFuncTypeError(TypeError):
|
||||
ufunc: Final[np.ufunc]
|
||||
def __init__(self, /, ufunc: np.ufunc) -> None: ...
|
||||
|
||||
class _UFuncNoLoopError(UFuncTypeError):
|
||||
dtypes: tuple[np.dtype, ...]
|
||||
def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
|
||||
|
||||
class _UFuncBinaryResolutionError(_UFuncNoLoopError):
|
||||
dtypes: tuple[np.dtype, np.dtype]
|
||||
def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
|
||||
|
||||
class _UFuncCastingError(UFuncTypeError):
|
||||
casting: Final[_CastingKind]
|
||||
from_: Final[np.dtype]
|
||||
to: Final[np.dtype]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ...
|
||||
|
||||
class _UFuncInputCastingError(_UFuncCastingError):
|
||||
in_i: Final[int]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
|
||||
|
||||
class _UFuncOutputCastingError(_UFuncCastingError):
|
||||
out_i: Final[int]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
|
||||
|
||||
class _ArrayMemoryError(MemoryError):
|
||||
shape: tuple[int, ...]
|
||||
dtype: np.dtype
|
||||
def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ...
|
||||
@property
|
||||
def _total_size(self) -> int: ...
|
||||
@staticmethod
|
||||
def _size_to_string(num_bytes: int) -> str: ...
|
||||
|
||||
@overload
|
||||
def _unpack_tuple(tup: tuple[_T]) -> _T: ...
|
||||
@overload
|
||||
def _unpack_tuple(tup: _TupleT) -> _TupleT: ...
|
||||
def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...
|
||||
@@ -9,9 +9,7 @@ import warnings
|
||||
from contextlib import nullcontext
|
||||
|
||||
import numpy as np
|
||||
from numpy._core import multiarray as mu
|
||||
from numpy._core import numerictypes as nt
|
||||
from numpy._core import umath as um
|
||||
from numpy._core import multiarray as mu, numerictypes as nt, umath as um
|
||||
from numpy._core.multiarray import asanyarray
|
||||
from numpy._globals import _NoValue
|
||||
|
||||
@@ -121,7 +119,7 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
|
||||
|
||||
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
|
||||
if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):
|
||||
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
|
||||
warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
|
||||
|
||||
# Cast bool, unsigned int, and int to float64 by default
|
||||
if dtype is None:
|
||||
@@ -187,15 +185,14 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
|
||||
# Compute sum of squared deviations from mean
|
||||
# Note that x may not be inexact and that we need it to be an array,
|
||||
# not a scalar.
|
||||
x = asanyarray(arr - arrmean)
|
||||
|
||||
x = um.subtract(arr, arrmean, out=...)
|
||||
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
|
||||
x = um.multiply(x, x, out=x)
|
||||
x = um.square(x, out=x)
|
||||
# Fast-paths for built-in complex types
|
||||
elif x.dtype in _complex_to_float:
|
||||
xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
|
||||
um.multiply(xv, xv, out=xv)
|
||||
x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
|
||||
elif (_float_dtype := _complex_to_float.get(x.dtype)) is not None:
|
||||
xv = x.view(dtype=(_float_dtype, (2,)))
|
||||
um.square(xv, out=xv)
|
||||
x = um.add(xv[..., 0], xv[..., 1], out=x.real)
|
||||
# Most general case; includes handling object arrays containing imaginary
|
||||
# numbers and complex types with non-native byteorder
|
||||
else:
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -8,6 +8,13 @@ AVX2: ModuleType | None = ...
|
||||
AVX512F: ModuleType | None = ...
|
||||
AVX512_SKX: ModuleType | None = ...
|
||||
|
||||
# NOTE: these 2 are only defined on systems with an arm processor
|
||||
ASIMD: ModuleType | None = ...
|
||||
NEON: ModuleType | None = ...
|
||||
|
||||
# NOTE: This is only defined on systems with an riscv64 processor.
|
||||
RVV: ModuleType | None = ...
|
||||
|
||||
baseline: ModuleType | None = ...
|
||||
|
||||
@type_check_only
|
||||
@@ -17,6 +24,9 @@ class SimdTargets(TypedDict):
|
||||
FMA3: ModuleType | None
|
||||
AVX512F: ModuleType | None
|
||||
AVX512_SKX: ModuleType | None
|
||||
ASIMD: ModuleType | None
|
||||
NEON: ModuleType | None
|
||||
RVV: ModuleType | None
|
||||
baseline: ModuleType | None
|
||||
|
||||
targets: SimdTargets = ...
|
||||
|
||||
Binary file not shown.
@@ -36,6 +36,7 @@ _abstract_type_names = {
|
||||
|
||||
for _abstract_type_name in _abstract_type_names:
|
||||
allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name)
|
||||
del _abstract_type_name
|
||||
|
||||
for k, v in typeinfo.items():
|
||||
if k.startswith("NPY_") and v not in c_names_dict:
|
||||
@@ -44,6 +45,8 @@ for k, v in typeinfo.items():
|
||||
concrete_type = v.type
|
||||
allTypes[k] = concrete_type
|
||||
sctypeDict[k] = concrete_type
|
||||
del concrete_type
|
||||
del k, v
|
||||
|
||||
_aliases = {
|
||||
"double": "float64",
|
||||
@@ -60,6 +63,7 @@ _aliases = {
|
||||
for k, v in _aliases.items():
|
||||
sctypeDict[k] = allTypes[v]
|
||||
allTypes[k] = allTypes[v]
|
||||
del k, v
|
||||
|
||||
# extra aliases are added only to `sctypeDict`
|
||||
# to support dtype name access, such as`np.dtype("float")`
|
||||
@@ -76,18 +80,21 @@ _extra_aliases = {
|
||||
|
||||
for k, v in _extra_aliases.items():
|
||||
sctypeDict[k] = allTypes[v]
|
||||
del k, v
|
||||
|
||||
# include extended precision sized aliases
|
||||
for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]:
|
||||
longdouble_type: type = allTypes[full_name]
|
||||
longdouble_type = allTypes[full_name]
|
||||
|
||||
bits: int = dtype(longdouble_type).itemsize * 8
|
||||
base_name: str = "complex" if is_complex else "float"
|
||||
extended_prec_name: str = f"{base_name}{bits}"
|
||||
bits = dtype(longdouble_type).itemsize * 8
|
||||
base_name = "complex" if is_complex else "float"
|
||||
extended_prec_name = f"{base_name}{bits}"
|
||||
if extended_prec_name not in allTypes:
|
||||
sctypeDict[extended_prec_name] = longdouble_type
|
||||
allTypes[extended_prec_name] = longdouble_type
|
||||
|
||||
del is_complex, full_name, longdouble_type, bits, base_name, extended_prec_name
|
||||
|
||||
|
||||
####################
|
||||
# Building `sctypes`
|
||||
@@ -110,10 +117,15 @@ for type_info in typeinfo.values():
|
||||
]:
|
||||
if issubclass(concrete_type, abstract_type):
|
||||
sctypes[type_group].add(concrete_type)
|
||||
del type_group, abstract_type
|
||||
break
|
||||
|
||||
del type_info, concrete_type
|
||||
|
||||
# sort sctype groups by bitsize
|
||||
for sctype_key in sctypes.keys():
|
||||
sctype_list = list(sctypes[sctype_key])
|
||||
sctype_list.sort(key=lambda x: dtype(x).itemsize)
|
||||
sctypes[sctype_key] = sctype_list
|
||||
|
||||
del sctype_key, sctype_list
|
||||
|
||||
@@ -1,19 +1,8 @@
|
||||
from collections.abc import Collection
|
||||
from typing import Final, TypeAlias, TypedDict, type_check_only
|
||||
from typing import Literal as L
|
||||
from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only
|
||||
|
||||
import numpy as np
|
||||
|
||||
__all__ = (
|
||||
"_abstract_type_names",
|
||||
"_aliases",
|
||||
"_extra_aliases",
|
||||
"allTypes",
|
||||
"c_names_dict",
|
||||
"sctypeDict",
|
||||
"sctypes",
|
||||
)
|
||||
|
||||
sctypeDict: Final[dict[str, type[np.generic]]]
|
||||
allTypes: Final[dict[str, type[np.generic]]]
|
||||
|
||||
|
||||
@@ -57,6 +57,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
|
||||
seterrcall : Set a callback function for the 'call' mode.
|
||||
geterr, geterrcall, errstate
|
||||
|
||||
|
||||
Notes
|
||||
-----
|
||||
The floating-point exceptions are defined in the IEEE 754 standard [1]_:
|
||||
@@ -68,6 +69,8 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
|
||||
- Invalid operation: result is not an expressible number, typically
|
||||
indicates that a NaN was produced.
|
||||
|
||||
**Concurrency note:** see :ref:`fp_error_handling`
|
||||
|
||||
.. [1] https://en.wikipedia.org/wiki/IEEE_754
|
||||
|
||||
Examples
|
||||
@@ -127,6 +130,8 @@ def geterr():
|
||||
For complete documentation of the types of floating-point exceptions and
|
||||
treatment options, see `seterr`.
|
||||
|
||||
**Concurrency note:** see :doc:`/reference/routines.err`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
@@ -172,6 +177,10 @@ def setbufsize(size):
|
||||
bufsize : int
|
||||
Previous size of ufunc buffer in bytes.
|
||||
|
||||
Notes
|
||||
-----
|
||||
**Concurrency note:** see :doc:`/reference/routines.err`
|
||||
|
||||
Examples
|
||||
--------
|
||||
When exiting a `numpy.errstate` context manager the bufsize is restored:
|
||||
@@ -205,6 +214,12 @@ def getbufsize():
|
||||
getbufsize : int
|
||||
Size of ufunc buffer in bytes.
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
**Concurrency note:** see :doc:`/reference/routines.err`
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
@@ -256,6 +271,11 @@ def seterrcall(func):
|
||||
--------
|
||||
seterr, geterr, geterrcall
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
**Concurrency note:** see :doc:`/reference/routines.err`
|
||||
|
||||
Examples
|
||||
--------
|
||||
Callback upon error:
|
||||
@@ -331,6 +351,8 @@ def geterrcall():
|
||||
For complete documentation of the types of floating-point exceptions and
|
||||
treatment options, see `seterr`.
|
||||
|
||||
**Concurrency note:** see :ref:`fp_error_handling`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
@@ -399,6 +421,8 @@ class errstate:
|
||||
For complete documentation of the types of floating-point exceptions and
|
||||
treatment options, see `seterr`.
|
||||
|
||||
**Concurrency note:** see :ref:`fp_error_handling`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
|
||||
@@ -1,16 +1,7 @@
|
||||
from _typeshed import SupportsWrite
|
||||
from collections.abc import Callable
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
Final,
|
||||
Literal,
|
||||
TypeAlias,
|
||||
TypedDict,
|
||||
TypeVar,
|
||||
type_check_only,
|
||||
)
|
||||
|
||||
from _typeshed import SupportsWrite
|
||||
from typing import Any, Final, Literal, TypeAlias, TypedDict, TypeVar, type_check_only
|
||||
|
||||
__all__ = [
|
||||
"seterr",
|
||||
@@ -65,11 +56,11 @@ class errstate:
|
||||
) -> None: ...
|
||||
|
||||
def seterr(
|
||||
all: _ErrKind | None = ...,
|
||||
divide: _ErrKind | None = ...,
|
||||
over: _ErrKind | None = ...,
|
||||
under: _ErrKind | None = ...,
|
||||
invalid: _ErrKind | None = ...,
|
||||
all: _ErrKind | None = None,
|
||||
divide: _ErrKind | None = None,
|
||||
over: _ErrKind | None = None,
|
||||
under: _ErrKind | None = None,
|
||||
invalid: _ErrKind | None = None,
|
||||
) -> _ErrDict: ...
|
||||
def geterr() -> _ErrDict: ...
|
||||
def setbufsize(size: int) -> int: ...
|
||||
|
||||
Binary file not shown.
@@ -248,11 +248,15 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
|
||||
--------
|
||||
get_printoptions, printoptions, array2string
|
||||
|
||||
|
||||
Notes
|
||||
-----
|
||||
`formatter` is always reset with a call to `set_printoptions`.
|
||||
|
||||
Use `printoptions` as a context manager to set the values temporarily.
|
||||
* ``formatter`` is always reset with a call to `set_printoptions`.
|
||||
* Use `printoptions` as a context manager to set the values temporarily.
|
||||
* These print options apply only to NumPy ndarrays, not to scalars.
|
||||
|
||||
**Concurrency note:** see :ref:`text_formatting_options`
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -352,6 +356,12 @@ def get_printoptions():
|
||||
|
||||
For a full description of these options, see `set_printoptions`.
|
||||
|
||||
Notes
|
||||
-----
|
||||
These print options apply only to NumPy ndarrays, not to scalars.
|
||||
|
||||
**Concurrency note:** see :ref:`text_formatting_options`
|
||||
|
||||
See Also
|
||||
--------
|
||||
set_printoptions, printoptions
|
||||
@@ -410,6 +420,12 @@ def printoptions(*args, **kwargs):
|
||||
--------
|
||||
set_printoptions, get_printoptions
|
||||
|
||||
Notes
|
||||
-----
|
||||
These print options apply only to NumPy ndarrays, not to scalars.
|
||||
|
||||
**Concurrency note:** see :ref:`text_formatting_options`
|
||||
|
||||
"""
|
||||
token = _set_printoptions(*args, **kwargs)
|
||||
|
||||
@@ -610,18 +626,18 @@ def _array2string(a, options, separator=' ', prefix=""):
|
||||
def _array2string_dispatcher(
|
||||
a, max_line_width=None, precision=None,
|
||||
suppress_small=None, separator=None, prefix=None,
|
||||
style=None, formatter=None, threshold=None,
|
||||
*, formatter=None, threshold=None,
|
||||
edgeitems=None, sign=None, floatmode=None, suffix=None,
|
||||
*, legacy=None):
|
||||
legacy=None):
|
||||
return (a,)
|
||||
|
||||
|
||||
@array_function_dispatch(_array2string_dispatcher, module='numpy')
|
||||
def array2string(a, max_line_width=None, precision=None,
|
||||
suppress_small=None, separator=' ', prefix="",
|
||||
style=np._NoValue, formatter=None, threshold=None,
|
||||
*, formatter=None, threshold=None,
|
||||
edgeitems=None, sign=None, floatmode=None, suffix="",
|
||||
*, legacy=None):
|
||||
legacy=None):
|
||||
"""
|
||||
Return a string representation of an array.
|
||||
|
||||
@@ -654,10 +670,6 @@ def array2string(a, max_line_width=None, precision=None,
|
||||
wrapping is forced at the column ``max_line_width - len(suffix)``.
|
||||
It should be noted that the content of prefix and suffix strings are
|
||||
not included in the output.
|
||||
style : _NoValue, optional
|
||||
Has no effect, do not use.
|
||||
|
||||
.. deprecated:: 1.14.0
|
||||
formatter : dict of callables, optional
|
||||
If not None, the keys should indicate the type(s) that the respective
|
||||
formatting function applies to. Callables should return a string.
|
||||
@@ -777,16 +789,8 @@ def array2string(a, max_line_width=None, precision=None,
|
||||
options.update(overrides)
|
||||
|
||||
if options['legacy'] <= 113:
|
||||
if style is np._NoValue:
|
||||
style = repr
|
||||
|
||||
if a.shape == () and a.dtype.names is None:
|
||||
return style(a.item())
|
||||
elif style is not np._NoValue:
|
||||
# Deprecation 11-9-2017 v1.14
|
||||
warnings.warn("'style' argument is deprecated and no longer functional"
|
||||
" except in 1.13 'legacy' mode",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return repr(a.item())
|
||||
|
||||
if options['legacy'] > 113:
|
||||
options['linewidth'] -= len(suffix)
|
||||
|
||||
@@ -10,14 +10,10 @@ from typing import (
|
||||
SupportsIndex,
|
||||
TypeAlias,
|
||||
TypedDict,
|
||||
overload,
|
||||
type_check_only,
|
||||
)
|
||||
|
||||
from typing_extensions import deprecated
|
||||
|
||||
import numpy as np
|
||||
from numpy._globals import _NoValueType
|
||||
from numpy._typing import NDArray, _CharLike_co, _FloatLike_co
|
||||
|
||||
__all__ = [
|
||||
@@ -78,14 +74,14 @@ class _FormatOptions(TypedDict):
|
||||
__docformat__: Final = "restructuredtext" # undocumented
|
||||
|
||||
def set_printoptions(
|
||||
precision: SupportsIndex | None = ...,
|
||||
threshold: int | None = ...,
|
||||
edgeitems: int | None = ...,
|
||||
linewidth: int | None = ...,
|
||||
suppress: bool | None = ...,
|
||||
nanstr: str | None = ...,
|
||||
infstr: str | None = ...,
|
||||
formatter: _FormatDict | None = ...,
|
||||
precision: SupportsIndex | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
linewidth: int | None = None,
|
||||
suppress: bool | None = None,
|
||||
nanstr: str | None = None,
|
||||
infstr: str | None = None,
|
||||
formatter: _FormatDict | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
*,
|
||||
@@ -95,7 +91,6 @@ def set_printoptions(
|
||||
def get_printoptions() -> _FormatOptions: ...
|
||||
|
||||
# public numpy export
|
||||
@overload # no style
|
||||
def array2string(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None = None,
|
||||
@@ -103,123 +98,48 @@ def array2string(
|
||||
suppress_small: bool | None = None,
|
||||
separator: str = " ",
|
||||
prefix: str = "",
|
||||
style: _NoValueType = ...,
|
||||
*,
|
||||
formatter: _FormatDict | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
suffix: str = "",
|
||||
*,
|
||||
legacy: _Legacy | None = None,
|
||||
) -> str: ...
|
||||
@overload # style=<given> (positional), legacy="1.13"
|
||||
def array2string(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None,
|
||||
precision: SupportsIndex | None,
|
||||
suppress_small: bool | None,
|
||||
separator: str,
|
||||
prefix: str,
|
||||
style: _ReprFunc,
|
||||
formatter: _FormatDict | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
suffix: str = "",
|
||||
*,
|
||||
legacy: Literal["1.13"],
|
||||
) -> str: ...
|
||||
@overload # style=<given> (keyword), legacy="1.13"
|
||||
def array2string(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None = None,
|
||||
precision: SupportsIndex | None = None,
|
||||
suppress_small: bool | None = None,
|
||||
separator: str = " ",
|
||||
prefix: str = "",
|
||||
*,
|
||||
style: _ReprFunc,
|
||||
formatter: _FormatDict | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
suffix: str = "",
|
||||
legacy: Literal["1.13"],
|
||||
) -> str: ...
|
||||
@overload # style=<given> (positional), legacy!="1.13"
|
||||
@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode")
|
||||
def array2string(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None,
|
||||
precision: SupportsIndex | None,
|
||||
suppress_small: bool | None,
|
||||
separator: str,
|
||||
prefix: str,
|
||||
style: _ReprFunc,
|
||||
formatter: _FormatDict | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
suffix: str = "",
|
||||
*,
|
||||
legacy: _LegacyNoStyle | None = None,
|
||||
) -> str: ...
|
||||
@overload # style=<given> (keyword), legacy="1.13"
|
||||
@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode")
|
||||
def array2string(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None = None,
|
||||
precision: SupportsIndex | None = None,
|
||||
suppress_small: bool | None = None,
|
||||
separator: str = " ",
|
||||
prefix: str = "",
|
||||
*,
|
||||
style: _ReprFunc,
|
||||
formatter: _FormatDict | None = None,
|
||||
threshold: int | None = None,
|
||||
edgeitems: int | None = None,
|
||||
sign: _Sign | None = None,
|
||||
floatmode: _FloatMode | None = None,
|
||||
suffix: str = "",
|
||||
legacy: _LegacyNoStyle | None = None,
|
||||
) -> str: ...
|
||||
|
||||
def format_float_scientific(
|
||||
x: _FloatLike_co,
|
||||
precision: int | None = ...,
|
||||
unique: bool = ...,
|
||||
precision: int | None = None,
|
||||
unique: bool = True,
|
||||
trim: _Trim = "k",
|
||||
sign: bool = ...,
|
||||
pad_left: int | None = ...,
|
||||
exp_digits: int | None = ...,
|
||||
min_digits: int | None = ...,
|
||||
sign: bool = False,
|
||||
pad_left: int | None = None,
|
||||
exp_digits: int | None = None,
|
||||
min_digits: int | None = None,
|
||||
) -> str: ...
|
||||
def format_float_positional(
|
||||
x: _FloatLike_co,
|
||||
precision: int | None = ...,
|
||||
unique: bool = ...,
|
||||
fractional: bool = ...,
|
||||
precision: int | None = None,
|
||||
unique: bool = True,
|
||||
fractional: bool = True,
|
||||
trim: _Trim = "k",
|
||||
sign: bool = ...,
|
||||
pad_left: int | None = ...,
|
||||
pad_right: int | None = ...,
|
||||
min_digits: int | None = ...,
|
||||
sign: bool = False,
|
||||
pad_left: int | None = None,
|
||||
pad_right: int | None = None,
|
||||
min_digits: int | None = None,
|
||||
) -> str: ...
|
||||
def array_repr(
|
||||
arr: NDArray[Any],
|
||||
max_line_width: int | None = ...,
|
||||
precision: SupportsIndex | None = ...,
|
||||
suppress_small: bool | None = ...,
|
||||
max_line_width: int | None = None,
|
||||
precision: SupportsIndex | None = None,
|
||||
suppress_small: bool | None = None,
|
||||
) -> str: ...
|
||||
def array_str(
|
||||
a: NDArray[Any],
|
||||
max_line_width: int | None = ...,
|
||||
precision: SupportsIndex | None = ...,
|
||||
suppress_small: bool | None = ...,
|
||||
max_line_width: int | None = None,
|
||||
precision: SupportsIndex | None = None,
|
||||
suppress_small: bool | None = None,
|
||||
) -> str: ...
|
||||
def printoptions(
|
||||
precision: SupportsIndex | None = ...,
|
||||
|
||||
@@ -22,31 +22,19 @@ from numpy._core import overrides
|
||||
from numpy._core.multiarray import compare_chararrays
|
||||
from numpy._core.strings import (
|
||||
_join as join,
|
||||
)
|
||||
from numpy._core.strings import (
|
||||
_rsplit as rsplit,
|
||||
)
|
||||
from numpy._core.strings import (
|
||||
_split as split,
|
||||
)
|
||||
from numpy._core.strings import (
|
||||
_splitlines as splitlines,
|
||||
)
|
||||
from numpy._utils import set_module
|
||||
from numpy.strings import *
|
||||
from numpy.strings import (
|
||||
multiply as strings_multiply,
|
||||
)
|
||||
from numpy.strings import (
|
||||
partition as strings_partition,
|
||||
)
|
||||
from numpy.strings import (
|
||||
rpartition as strings_rpartition,
|
||||
)
|
||||
|
||||
from .numeric import array as narray
|
||||
from .numeric import asarray as asnarray
|
||||
from .numeric import ndarray
|
||||
from .numeric import array as narray, asarray as asnarray, ndarray
|
||||
from .numerictypes import bytes_, character, str_
|
||||
|
||||
__all__ = [
|
||||
@@ -508,7 +496,6 @@ class chararray(ndarray):
|
||||
title
|
||||
tofile
|
||||
tolist
|
||||
tostring
|
||||
translate
|
||||
transpose
|
||||
upper
|
||||
@@ -731,7 +718,7 @@ class chararray(ndarray):
|
||||
def __rmod__(self, other):
|
||||
return NotImplemented
|
||||
|
||||
def argsort(self, axis=-1, kind=None, order=None):
|
||||
def argsort(self, axis=-1, kind=None, order=None, *, stable=None):
|
||||
"""
|
||||
Return the indices that sort the array lexicographically.
|
||||
|
||||
@@ -749,7 +736,7 @@ class chararray(ndarray):
|
||||
dtype='|S5')
|
||||
|
||||
"""
|
||||
return self.__array__().argsort(axis, kind, order)
|
||||
return self.__array__().argsort(axis, kind, order, stable=stable)
|
||||
argsort.__doc__ = ndarray.argsort.__doc__
|
||||
|
||||
def capitalize(self):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,12 +2,14 @@
|
||||
Implementation of optimized einsum.
|
||||
|
||||
"""
|
||||
import functools
|
||||
import itertools
|
||||
import operator
|
||||
|
||||
from numpy._core.multiarray import c_einsum
|
||||
from numpy._core.numeric import asanyarray, tensordot
|
||||
from numpy._core.multiarray import c_einsum, matmul
|
||||
from numpy._core.numeric import asanyarray, reshape
|
||||
from numpy._core.overrides import array_function_dispatch
|
||||
from numpy._core.umath import multiply
|
||||
|
||||
__all__ = ['einsum', 'einsum_path']
|
||||
|
||||
@@ -440,116 +442,6 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
|
||||
return path
|
||||
|
||||
|
||||
def _can_dot(inputs, result, idx_removed):
|
||||
"""
|
||||
Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inputs : list of str
|
||||
Specifies the subscripts for summation.
|
||||
result : str
|
||||
Resulting summation.
|
||||
idx_removed : set
|
||||
Indices that are removed in the summation
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
type : bool
|
||||
Returns true if BLAS should and can be used, else False
|
||||
|
||||
Notes
|
||||
-----
|
||||
If the operations is BLAS level 1 or 2 and is not already aligned
|
||||
we default back to einsum as the memory movement to copy is more
|
||||
costly than the operation itself.
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
# Standard GEMM operation
|
||||
>>> _can_dot(['ij', 'jk'], 'ik', set('j'))
|
||||
True
|
||||
|
||||
# Can use the standard BLAS, but requires odd data movement
|
||||
>>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
|
||||
False
|
||||
|
||||
# DDOT where the memory is not aligned
|
||||
>>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
|
||||
False
|
||||
|
||||
"""
|
||||
|
||||
# All `dot` calls remove indices
|
||||
if len(idx_removed) == 0:
|
||||
return False
|
||||
|
||||
# BLAS can only handle two operands
|
||||
if len(inputs) != 2:
|
||||
return False
|
||||
|
||||
input_left, input_right = inputs
|
||||
|
||||
for c in set(input_left + input_right):
|
||||
# can't deal with repeated indices on same input or more than 2 total
|
||||
nl, nr = input_left.count(c), input_right.count(c)
|
||||
if (nl > 1) or (nr > 1) or (nl + nr > 2):
|
||||
return False
|
||||
|
||||
# can't do implicit summation or dimension collapse e.g.
|
||||
# "ab,bc->c" (implicitly sum over 'a')
|
||||
# "ab,ca->ca" (take diagonal of 'a')
|
||||
if nl + nr - 1 == int(c in result):
|
||||
return False
|
||||
|
||||
# Build a few temporaries
|
||||
set_left = set(input_left)
|
||||
set_right = set(input_right)
|
||||
keep_left = set_left - idx_removed
|
||||
keep_right = set_right - idx_removed
|
||||
rs = len(idx_removed)
|
||||
|
||||
# At this point we are a DOT, GEMV, or GEMM operation
|
||||
|
||||
# Handle inner products
|
||||
|
||||
# DDOT with aligned data
|
||||
if input_left == input_right:
|
||||
return True
|
||||
|
||||
# DDOT without aligned data (better to use einsum)
|
||||
if set_left == set_right:
|
||||
return False
|
||||
|
||||
# Handle the 4 possible (aligned) GEMV or GEMM cases
|
||||
|
||||
# GEMM or GEMV no transpose
|
||||
if input_left[-rs:] == input_right[:rs]:
|
||||
return True
|
||||
|
||||
# GEMM or GEMV transpose both
|
||||
if input_left[:rs] == input_right[-rs:]:
|
||||
return True
|
||||
|
||||
# GEMM or GEMV transpose right
|
||||
if input_left[-rs:] == input_right[-rs:]:
|
||||
return True
|
||||
|
||||
# GEMM or GEMV transpose left
|
||||
if input_left[:rs] == input_right[:rs]:
|
||||
return True
|
||||
|
||||
# Einsum is faster than GEMV if we have to copy data
|
||||
if not keep_left or not keep_right:
|
||||
return False
|
||||
|
||||
# We are a matrix-matrix product, but we need to copy data
|
||||
return True
|
||||
|
||||
|
||||
def _parse_einsum_input(operands):
|
||||
"""
|
||||
A reproduction of einsum c side einsum parsing in python.
|
||||
@@ -887,13 +779,14 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
||||
|
||||
# Build a few useful list and sets
|
||||
input_list = input_subscripts.split(',')
|
||||
num_inputs = len(input_list)
|
||||
input_sets = [set(x) for x in input_list]
|
||||
output_set = set(output_subscript)
|
||||
indices = set(input_subscripts.replace(',', ''))
|
||||
num_indices = len(indices)
|
||||
|
||||
# Get length of each unique dimension and ensure all dimensions are correct
|
||||
dimension_dict = {}
|
||||
broadcast_indices = [[] for x in range(len(input_list))]
|
||||
for tnum, term in enumerate(input_list):
|
||||
sh = operands[tnum].shape
|
||||
if len(sh) != len(term):
|
||||
@@ -903,10 +796,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
||||
for cnum, char in enumerate(term):
|
||||
dim = sh[cnum]
|
||||
|
||||
# Build out broadcast indices
|
||||
if dim == 1:
|
||||
broadcast_indices[tnum].append(char)
|
||||
|
||||
if char in dimension_dict.keys():
|
||||
# For broadcasting cases we always want the largest dim size
|
||||
if dimension_dict[char] == 1:
|
||||
@@ -918,9 +807,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
||||
else:
|
||||
dimension_dict[char] = dim
|
||||
|
||||
# Convert broadcast inds to sets
|
||||
broadcast_indices = [set(x) for x in broadcast_indices]
|
||||
|
||||
# Compute size of each input array plus the output array
|
||||
size_list = [_compute_size_by_dict(term, dimension_dict)
|
||||
for term in input_list + [output_subscript]]
|
||||
@@ -931,23 +817,16 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
||||
else:
|
||||
memory_arg = memory_limit
|
||||
|
||||
# Compute naive cost
|
||||
# This isn't quite right, need to look into exactly how einsum does this
|
||||
inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
|
||||
naive_cost = _flop_count(
|
||||
indices, inner_product, len(input_list), dimension_dict
|
||||
)
|
||||
|
||||
# Compute the path
|
||||
if explicit_einsum_path:
|
||||
path = path_type[1:]
|
||||
elif (
|
||||
(path_type is False)
|
||||
or (len(input_list) in [1, 2])
|
||||
or (num_inputs in [1, 2])
|
||||
or (indices == output_set)
|
||||
):
|
||||
# Nothing to be optimized, leave it to einsum
|
||||
path = [tuple(range(len(input_list)))]
|
||||
path = [tuple(range(num_inputs))]
|
||||
elif path_type == "greedy":
|
||||
path = _greedy_path(
|
||||
input_sets, output_set, dimension_dict, memory_arg
|
||||
@@ -969,26 +848,18 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
||||
contract = _find_contraction(contract_inds, input_sets, output_set)
|
||||
out_inds, input_sets, idx_removed, idx_contract = contract
|
||||
|
||||
cost = _flop_count(
|
||||
idx_contract, idx_removed, len(contract_inds), dimension_dict
|
||||
)
|
||||
cost_list.append(cost)
|
||||
scale_list.append(len(idx_contract))
|
||||
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
|
||||
if not einsum_call_arg:
|
||||
# these are only needed for printing info
|
||||
cost = _flop_count(
|
||||
idx_contract, idx_removed, len(contract_inds), dimension_dict
|
||||
)
|
||||
cost_list.append(cost)
|
||||
scale_list.append(len(idx_contract))
|
||||
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
|
||||
|
||||
bcast = set()
|
||||
tmp_inputs = []
|
||||
for x in contract_inds:
|
||||
tmp_inputs.append(input_list.pop(x))
|
||||
bcast |= broadcast_indices.pop(x)
|
||||
|
||||
new_bcast_inds = bcast - idx_removed
|
||||
|
||||
# If we're broadcasting, nix blas
|
||||
if not len(idx_removed & bcast):
|
||||
do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
|
||||
else:
|
||||
do_blas = False
|
||||
|
||||
# Last contraction
|
||||
if (cnum - len(path)) == -1:
|
||||
@@ -998,16 +869,11 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
||||
idx_result = "".join([x[1] for x in sorted(sort_result)])
|
||||
|
||||
input_list.append(idx_result)
|
||||
broadcast_indices.append(new_bcast_inds)
|
||||
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
|
||||
|
||||
contraction = (
|
||||
contract_inds, idx_removed, einsum_str, input_list[:], do_blas
|
||||
)
|
||||
contraction = (contract_inds, einsum_str, input_list[:])
|
||||
contraction_list.append(contraction)
|
||||
|
||||
opt_cost = sum(cost_list) + 1
|
||||
|
||||
if len(input_list) != 1:
|
||||
# Explicit "einsum_path" is usually trusted, but we detect this kind of
|
||||
# mistake in order to prevent from returning an intermediate value.
|
||||
@@ -1022,11 +888,21 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
||||
overall_contraction = input_subscripts + "->" + output_subscript
|
||||
header = ("scaling", "current", "remaining")
|
||||
|
||||
# Compute naive cost
|
||||
# This isn't quite right, need to look into exactly how einsum does this
|
||||
inner_product = (
|
||||
sum(len(set(x)) for x in input_subscripts.split(',')) - num_indices
|
||||
) > 0
|
||||
naive_cost = _flop_count(
|
||||
indices, inner_product, num_inputs, dimension_dict
|
||||
)
|
||||
|
||||
opt_cost = sum(cost_list) + 1
|
||||
speedup = naive_cost / opt_cost
|
||||
max_i = max(size_list)
|
||||
|
||||
path_print = f" Complete contraction: {overall_contraction}\n"
|
||||
path_print += f" Naive scaling: {len(indices)}\n"
|
||||
path_print += f" Naive scaling: {num_indices}\n"
|
||||
path_print += " Optimized scaling: %d\n" % max(scale_list)
|
||||
path_print += f" Naive FLOP count: {naive_cost:.3e}\n"
|
||||
path_print += f" Optimized FLOP count: {opt_cost:.3e}\n"
|
||||
@@ -1037,7 +913,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
||||
path_print += "-" * 74
|
||||
|
||||
for n, contraction in enumerate(contraction_list):
|
||||
inds, idx_rm, einsum_str, remaining, blas = contraction
|
||||
_, einsum_str, remaining = contraction
|
||||
remaining_str = ",".join(remaining) + "->" + output_subscript
|
||||
path_run = (scale_list[n], einsum_str, remaining_str)
|
||||
path_print += "\n%4d %24s %40s" % path_run
|
||||
@@ -1046,6 +922,317 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
||||
return (path, path_print)
|
||||
|
||||
|
||||
def _parse_eq_to_pure_multiplication(a_term, shape_a, b_term, shape_b, out):
|
||||
"""If there are no contracted indices, then we can directly transpose and
|
||||
insert singleton dimensions into ``a`` and ``b`` such that (broadcast)
|
||||
elementwise multiplication performs the einsum.
|
||||
|
||||
No need to cache this as it is within the cached
|
||||
``_parse_eq_to_batch_matmul``.
|
||||
|
||||
"""
|
||||
desired_a = ""
|
||||
desired_b = ""
|
||||
new_shape_a = []
|
||||
new_shape_b = []
|
||||
for ix in out:
|
||||
if ix in a_term:
|
||||
desired_a += ix
|
||||
new_shape_a.append(shape_a[a_term.index(ix)])
|
||||
else:
|
||||
new_shape_a.append(1)
|
||||
if ix in b_term:
|
||||
desired_b += ix
|
||||
new_shape_b.append(shape_b[b_term.index(ix)])
|
||||
else:
|
||||
new_shape_b.append(1)
|
||||
|
||||
if desired_a != a_term:
|
||||
eq_a = f"{a_term}->{desired_a}"
|
||||
else:
|
||||
eq_a = None
|
||||
if desired_b != b_term:
|
||||
eq_b = f"{b_term}->{desired_b}"
|
||||
else:
|
||||
eq_b = None
|
||||
|
||||
return (
|
||||
eq_a,
|
||||
eq_b,
|
||||
new_shape_a,
|
||||
new_shape_b,
|
||||
None, # new_shape_ab, not needed since not fusing
|
||||
None, # perm_ab, not needed as we transpose a and b first
|
||||
True, # pure_multiplication=True
|
||||
)
|
||||
|
||||
|
||||
@functools.lru_cache(2**12)
|
||||
def _parse_eq_to_batch_matmul(eq, shape_a, shape_b):
|
||||
"""Cached parsing of a two term einsum equation into the necessary
|
||||
sequence of arguments for contracttion via batched matrix multiplication.
|
||||
The steps we need to specify are:
|
||||
|
||||
1. Remove repeated and trivial indices from the left and right terms,
|
||||
and transpose them, done as a single einsum.
|
||||
2. Fuse the remaining indices so we have two 3D tensors.
|
||||
3. Perform the batched matrix multiplication.
|
||||
4. Unfuse the output to get the desired final index order.
|
||||
|
||||
"""
|
||||
lhs, out = eq.split("->")
|
||||
a_term, b_term = lhs.split(",")
|
||||
|
||||
if len(a_term) != len(shape_a):
|
||||
raise ValueError(f"Term '{a_term}' does not match shape {shape_a}.")
|
||||
if len(b_term) != len(shape_b):
|
||||
raise ValueError(f"Term '{b_term}' does not match shape {shape_b}.")
|
||||
|
||||
sizes = {}
|
||||
singletons = set()
|
||||
|
||||
# parse left term to unique indices with size > 1
|
||||
left = {}
|
||||
for ix, d in zip(a_term, shape_a):
|
||||
if d == 1:
|
||||
# everything (including broadcasting) works nicely if simply ignore
|
||||
# such dimensions, but we do need to track if they appear in output
|
||||
# and thus should be reintroduced later
|
||||
singletons.add(ix)
|
||||
continue
|
||||
if sizes.setdefault(ix, d) != d:
|
||||
# set and check size
|
||||
raise ValueError(
|
||||
f"Index {ix} has mismatched sizes {sizes[ix]} and {d}."
|
||||
)
|
||||
left[ix] = True
|
||||
|
||||
# parse right term to unique indices with size > 1
|
||||
right = {}
|
||||
for ix, d in zip(b_term, shape_b):
|
||||
# broadcast indices (size 1 on one input and size != 1
|
||||
# on the other) should not be treated as singletons
|
||||
if d == 1:
|
||||
if ix not in left:
|
||||
singletons.add(ix)
|
||||
continue
|
||||
singletons.discard(ix)
|
||||
|
||||
if sizes.setdefault(ix, d) != d:
|
||||
# set and check size
|
||||
raise ValueError(
|
||||
f"Index {ix} has mismatched sizes {sizes[ix]} and {d}."
|
||||
)
|
||||
right[ix] = True
|
||||
|
||||
# now we classify the unique size > 1 indices only
|
||||
bat_inds = [] # appears on A, B, O
|
||||
con_inds = [] # appears on A, B, .
|
||||
a_keep = [] # appears on A, ., O
|
||||
b_keep = [] # appears on ., B, O
|
||||
# other indices (appearing on A or B only) will
|
||||
# be summed or traced out prior to the matmul
|
||||
for ix in left:
|
||||
if right.pop(ix, False):
|
||||
if ix in out:
|
||||
bat_inds.append(ix)
|
||||
else:
|
||||
con_inds.append(ix)
|
||||
elif ix in out:
|
||||
a_keep.append(ix)
|
||||
# now only indices unique to right remain
|
||||
for ix in right:
|
||||
if ix in out:
|
||||
b_keep.append(ix)
|
||||
|
||||
if not con_inds:
|
||||
# contraction is pure multiplication, prepare inputs differently
|
||||
return _parse_eq_to_pure_multiplication(
|
||||
a_term, shape_a, b_term, shape_b, out
|
||||
)
|
||||
|
||||
# only need the size one indices that appear in the output
|
||||
singletons = [ix for ix in out if ix in singletons]
|
||||
|
||||
# take diagonal, remove any trivial axes and transpose left
|
||||
desired_a = "".join((*bat_inds, *a_keep, *con_inds))
|
||||
if a_term != desired_a:
|
||||
eq_a = f"{a_term}->{desired_a}"
|
||||
else:
|
||||
eq_a = None
|
||||
|
||||
# take diagonal, remove any trivial axes and transpose right
|
||||
desired_b = "".join((*bat_inds, *con_inds, *b_keep))
|
||||
if b_term != desired_b:
|
||||
eq_b = f"{b_term}->{desired_b}"
|
||||
else:
|
||||
eq_b = None
|
||||
|
||||
# then we want to reshape
|
||||
if bat_inds:
|
||||
lgroups = (bat_inds, a_keep, con_inds)
|
||||
rgroups = (bat_inds, con_inds, b_keep)
|
||||
ogroups = (bat_inds, a_keep, b_keep)
|
||||
else:
|
||||
# avoid size 1 batch dimension if no batch indices
|
||||
lgroups = (a_keep, con_inds)
|
||||
rgroups = (con_inds, b_keep)
|
||||
ogroups = (a_keep, b_keep)
|
||||
|
||||
if any(len(group) != 1 for group in lgroups):
|
||||
# need to fuse 'kept' and contracted indices
|
||||
# (though could allow batch indices to be broadcast)
|
||||
new_shape_a = tuple(
|
||||
functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1)
|
||||
for ix_group in lgroups
|
||||
)
|
||||
else:
|
||||
new_shape_a = None
|
||||
|
||||
if any(len(group) != 1 for group in rgroups):
|
||||
# need to fuse 'kept' and contracted indices
|
||||
# (though could allow batch indices to be broadcast)
|
||||
new_shape_b = tuple(
|
||||
functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1)
|
||||
for ix_group in rgroups
|
||||
)
|
||||
else:
|
||||
new_shape_b = None
|
||||
|
||||
if any(len(group) != 1 for group in ogroups) or singletons:
|
||||
new_shape_ab = (1,) * len(singletons) + tuple(
|
||||
sizes[ix] for ix_group in ogroups for ix in ix_group
|
||||
)
|
||||
else:
|
||||
new_shape_ab = None
|
||||
|
||||
# then we might need to permute the matmul produced output:
|
||||
out_produced = "".join((*singletons, *bat_inds, *a_keep, *b_keep))
|
||||
if out_produced != out:
|
||||
perm_ab = tuple(out_produced.index(ix) for ix in out)
|
||||
else:
|
||||
perm_ab = None
|
||||
|
||||
return (
|
||||
eq_a,
|
||||
eq_b,
|
||||
new_shape_a,
|
||||
new_shape_b,
|
||||
new_shape_ab,
|
||||
perm_ab,
|
||||
False, # pure_multiplication=False
|
||||
)
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=64)
|
||||
def _parse_output_order(order, a_is_fcontig, b_is_fcontig):
|
||||
order = order.upper()
|
||||
if order == "K":
|
||||
return None
|
||||
elif order in "CF":
|
||||
return order
|
||||
elif order == "A":
|
||||
if a_is_fcontig and b_is_fcontig:
|
||||
return "F"
|
||||
else:
|
||||
return "C"
|
||||
else:
|
||||
raise ValueError(
|
||||
"ValueError: order must be one of "
|
||||
f"'C', 'F', 'A', or 'K' (got '{order}')"
|
||||
)
|
||||
|
||||
|
||||
def bmm_einsum(eq, a, b, out=None, **kwargs):
|
||||
"""Perform arbitrary pairwise einsums using only ``matmul``, or
|
||||
``multiply`` if no contracted indices are involved (plus maybe single term
|
||||
``einsum`` to prepare the terms individually). The logic for each is cached
|
||||
based on the equation and array shape, and each step is only performed if
|
||||
necessary.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
eq : str
|
||||
The einsum equation.
|
||||
a : array_like
|
||||
The first array to contract.
|
||||
b : array_like
|
||||
The second array to contract.
|
||||
|
||||
Returns
|
||||
-------
|
||||
array_like
|
||||
|
||||
Notes
|
||||
-----
|
||||
A fuller description of this algorithm, and original source for this
|
||||
implementation, can be found at https://github.com/jcmgray/einsum_bmm.
|
||||
"""
|
||||
(
|
||||
eq_a,
|
||||
eq_b,
|
||||
new_shape_a,
|
||||
new_shape_b,
|
||||
new_shape_ab,
|
||||
perm_ab,
|
||||
pure_multiplication,
|
||||
) = _parse_eq_to_batch_matmul(eq, a.shape, b.shape)
|
||||
|
||||
# n.b. one could special case various cases to call c_einsum directly here
|
||||
|
||||
# need to handle `order` a little manually, since we do transpose
|
||||
# operations before and potentially after the ufunc calls
|
||||
output_order = _parse_output_order(
|
||||
kwargs.pop("order", "K"), a.flags.f_contiguous, b.flags.f_contiguous
|
||||
)
|
||||
|
||||
# prepare left
|
||||
if eq_a is not None:
|
||||
# diagonals, sums, and tranpose
|
||||
a = c_einsum(eq_a, a)
|
||||
if new_shape_a is not None:
|
||||
a = reshape(a, new_shape_a)
|
||||
|
||||
# prepare right
|
||||
if eq_b is not None:
|
||||
# diagonals, sums, and tranpose
|
||||
b = c_einsum(eq_b, b)
|
||||
if new_shape_b is not None:
|
||||
b = reshape(b, new_shape_b)
|
||||
|
||||
if pure_multiplication:
|
||||
# no contracted indices
|
||||
if output_order is not None:
|
||||
kwargs["order"] = output_order
|
||||
|
||||
# do the 'contraction' via multiplication!
|
||||
return multiply(a, b, out=out, **kwargs)
|
||||
|
||||
# can only supply out here if no other reshaping / transposing
|
||||
matmul_out_compatible = (new_shape_ab is None) and (perm_ab is None)
|
||||
if matmul_out_compatible:
|
||||
kwargs["out"] = out
|
||||
|
||||
# do the contraction!
|
||||
ab = matmul(a, b, **kwargs)
|
||||
|
||||
# prepare the output
|
||||
if new_shape_ab is not None:
|
||||
ab = reshape(ab, new_shape_ab)
|
||||
if perm_ab is not None:
|
||||
ab = ab.transpose(perm_ab)
|
||||
|
||||
if (out is not None) and (not matmul_out_compatible):
|
||||
# handle case where out is specified, but we also needed
|
||||
# to reshape / transpose ``ab`` after the matmul
|
||||
out[:] = ab
|
||||
ab = out
|
||||
elif output_order is not None:
|
||||
ab = asanyarray(ab, order=output_order)
|
||||
|
||||
return ab
|
||||
|
||||
|
||||
def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
|
||||
# Arguably we dispatch on more arguments than we really should; see note in
|
||||
# _einsum_path_dispatcher for why.
|
||||
@@ -1434,58 +1621,23 @@ def einsum(*operands, out=None, optimize=False, **kwargs):
|
||||
operands, contraction_list = einsum_path(*operands, optimize=optimize,
|
||||
einsum_call=True)
|
||||
|
||||
# Handle order kwarg for output array, c_einsum allows mixed case
|
||||
output_order = kwargs.pop('order', 'K')
|
||||
if output_order.upper() == 'A':
|
||||
if all(arr.flags.f_contiguous for arr in operands):
|
||||
output_order = 'F'
|
||||
else:
|
||||
output_order = 'C'
|
||||
|
||||
# Start contraction loop
|
||||
for num, contraction in enumerate(contraction_list):
|
||||
inds, idx_rm, einsum_str, remaining, blas = contraction
|
||||
inds, einsum_str, _ = contraction
|
||||
tmp_operands = [operands.pop(x) for x in inds]
|
||||
|
||||
# Do we need to deal with the output?
|
||||
handle_out = specified_out and ((num + 1) == len(contraction_list))
|
||||
|
||||
# Call tensordot if still possible
|
||||
if blas:
|
||||
# Checks have already been handled
|
||||
input_str, results_index = einsum_str.split('->')
|
||||
input_left, input_right = input_str.split(',')
|
||||
# If out was specified
|
||||
if handle_out:
|
||||
kwargs["out"] = out
|
||||
|
||||
tensor_result = input_left + input_right
|
||||
for s in idx_rm:
|
||||
tensor_result = tensor_result.replace(s, "")
|
||||
|
||||
# Find indices to contract over
|
||||
left_pos, right_pos = [], []
|
||||
for s in sorted(idx_rm):
|
||||
left_pos.append(input_left.find(s))
|
||||
right_pos.append(input_right.find(s))
|
||||
|
||||
# Contract!
|
||||
new_view = tensordot(
|
||||
*tmp_operands, axes=(tuple(left_pos), tuple(right_pos))
|
||||
)
|
||||
|
||||
# Build a new view if needed
|
||||
if (tensor_result != results_index) or handle_out:
|
||||
if handle_out:
|
||||
kwargs["out"] = out
|
||||
new_view = c_einsum(
|
||||
tensor_result + '->' + results_index, new_view, **kwargs
|
||||
)
|
||||
|
||||
# Call einsum
|
||||
if len(tmp_operands) == 2:
|
||||
# Call (batched) matrix multiplication if possible
|
||||
new_view = bmm_einsum(einsum_str, *tmp_operands, **kwargs)
|
||||
else:
|
||||
# If out was specified
|
||||
if handle_out:
|
||||
kwargs["out"] = out
|
||||
|
||||
# Do the contraction
|
||||
# Call einsum
|
||||
new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
|
||||
|
||||
# Append new items and dereference what we can
|
||||
@@ -1495,4 +1647,4 @@ def einsum(*operands, out=None, optimize=False, **kwargs):
|
||||
if specified_out:
|
||||
return out
|
||||
else:
|
||||
return asanyarray(operands[0], order=output_order)
|
||||
return operands[0]
|
||||
|
||||
@@ -42,55 +42,55 @@ def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeBool_co,
|
||||
out: None = ...,
|
||||
out: None = None,
|
||||
dtype: _DTypeLikeBool | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeUInt_co,
|
||||
out: None = ...,
|
||||
out: None = None,
|
||||
dtype: _DTypeLikeUInt | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeInt_co,
|
||||
out: None = ...,
|
||||
out: None = None,
|
||||
dtype: _DTypeLikeInt | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeFloat_co,
|
||||
out: None = ...,
|
||||
out: None = None,
|
||||
dtype: _DTypeLikeFloat | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeComplex_co,
|
||||
out: None = ...,
|
||||
out: None = None,
|
||||
dtype: _DTypeLikeComplex | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
@@ -99,9 +99,9 @@ def einsum(
|
||||
*operands: Any,
|
||||
casting: _CastingUnsafe,
|
||||
dtype: _DTypeLikeComplex_co | None = ...,
|
||||
out: None = ...,
|
||||
out: None = None,
|
||||
order: _OrderKACF = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
@@ -112,7 +112,7 @@ def einsum(
|
||||
dtype: _DTypeLikeComplex_co | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def einsum(
|
||||
@@ -123,7 +123,7 @@ def einsum(
|
||||
casting: _CastingUnsafe,
|
||||
dtype: _DTypeLikeComplex_co | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> _ArrayT: ...
|
||||
|
||||
@overload
|
||||
@@ -131,11 +131,11 @@ def einsum(
|
||||
subscripts: str | _ArrayLikeInt_co,
|
||||
/,
|
||||
*operands: _ArrayLikeObject_co,
|
||||
out: None = ...,
|
||||
out: None = None,
|
||||
dtype: _DTypeLikeObject | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
@@ -144,9 +144,9 @@ def einsum(
|
||||
*operands: Any,
|
||||
casting: _CastingUnsafe,
|
||||
dtype: _DTypeLikeObject | None = ...,
|
||||
out: None = ...,
|
||||
out: None = None,
|
||||
order: _OrderKACF = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def einsum(
|
||||
@@ -157,7 +157,7 @@ def einsum(
|
||||
dtype: _DTypeLikeObject | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
casting: _CastingSafe = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> _ArrayT: ...
|
||||
@overload
|
||||
def einsum(
|
||||
@@ -168,7 +168,7 @@ def einsum(
|
||||
casting: _CastingUnsafe,
|
||||
dtype: _DTypeLikeObject | None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
optimize: _OptimizeKind = ...,
|
||||
optimize: _OptimizeKind = False,
|
||||
) -> _ArrayT: ...
|
||||
|
||||
# NOTE: `einsum_call` is a hidden kwarg unavailable for public use.
|
||||
|
||||
@@ -2,16 +2,13 @@
|
||||
|
||||
"""
|
||||
import functools
|
||||
import math
|
||||
import types
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
from numpy._utils import set_module
|
||||
|
||||
from . import _methods, overrides
|
||||
from . import multiarray as mu
|
||||
from . import numerictypes as nt
|
||||
from . import umath as um
|
||||
from . import _methods, multiarray as mu, numerictypes as nt, overrides, umath as um
|
||||
from ._multiarray_umath import _array_converter
|
||||
from .multiarray import asanyarray, asarray, concatenate
|
||||
|
||||
@@ -172,7 +169,7 @@ def take(a, indices, axis=None, out=None, mode='raise'):
|
||||
|
||||
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
|
||||
for ii in ndindex(Ni):
|
||||
for kk in ndindex(Nj):
|
||||
for kk in ndindex(Nk):
|
||||
out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
|
||||
|
||||
For this reason, it is equivalent to (but faster than) the following use
|
||||
@@ -203,13 +200,12 @@ def take(a, indices, axis=None, out=None, mode='raise'):
|
||||
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
|
||||
|
||||
|
||||
def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None,
|
||||
copy=None):
|
||||
def _reshape_dispatcher(a, /, shape, order=None, *, copy=None):
|
||||
return (a,)
|
||||
|
||||
|
||||
@array_function_dispatch(_reshape_dispatcher)
|
||||
def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None):
|
||||
def reshape(a, /, shape, order='C', *, copy=None):
|
||||
"""
|
||||
Gives a new shape to an array without changing its data.
|
||||
|
||||
@@ -235,10 +231,6 @@ def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None):
|
||||
'A' means to read / write the elements in Fortran-like index
|
||||
order if ``a`` is Fortran *contiguous* in memory, C-like order
|
||||
otherwise.
|
||||
newshape : int or tuple of ints
|
||||
.. deprecated:: 2.1
|
||||
Replaced by ``shape`` argument. Retained for backward
|
||||
compatibility.
|
||||
copy : bool, optional
|
||||
If ``True``, then the array data is copied. If ``None``, a copy will
|
||||
only be made if it's required by ``order``. For ``False`` it raises
|
||||
@@ -302,23 +294,6 @@ def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None):
|
||||
[3, 4],
|
||||
[5, 6]])
|
||||
"""
|
||||
if newshape is None and shape is None:
|
||||
raise TypeError(
|
||||
"reshape() missing 1 required positional argument: 'shape'")
|
||||
if newshape is not None:
|
||||
if shape is not None:
|
||||
raise TypeError(
|
||||
"You cannot specify 'newshape' and 'shape' arguments "
|
||||
"at the same time.")
|
||||
# Deprecated in NumPy 2.1, 2024-04-18
|
||||
warnings.warn(
|
||||
"`newshape` keyword argument is deprecated, "
|
||||
"use `shape=...` or pass shape positionally instead. "
|
||||
"(deprecated in NumPy 2.1)",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
shape = newshape
|
||||
if copy is not None:
|
||||
return _wrapfunc(a, 'reshape', shape, order=order, copy=copy)
|
||||
return _wrapfunc(a, 'reshape', shape, order=order)
|
||||
@@ -779,8 +754,6 @@ def partition(a, kth, axis=-1, kind='introselect', order=None):
|
||||
provided with a sequence of k-th it will partition all elements
|
||||
indexed by k-th of them into their sorted position at once.
|
||||
|
||||
.. deprecated:: 1.22.0
|
||||
Passing booleans as index is deprecated.
|
||||
axis : int or None, optional
|
||||
Axis along which to sort. If None, the array is flattened before
|
||||
sorting. The default is -1, which sorts along the last axis.
|
||||
@@ -892,8 +865,6 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
|
||||
sequence of k-th it will partition all of them into their sorted
|
||||
position at once.
|
||||
|
||||
.. deprecated:: 1.22.0
|
||||
Passing booleans as index is deprecated.
|
||||
axis : int or None, optional
|
||||
Axis along which to sort. The default is -1 (the last axis). If
|
||||
None, the flattened array is used.
|
||||
@@ -1306,6 +1277,8 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue):
|
||||
|
||||
Indexes of the maximal elements of a N-dimensional array:
|
||||
|
||||
>>> a.flat[np.argmax(a)]
|
||||
15
|
||||
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
|
||||
>>> ind
|
||||
(1, 2)
|
||||
@@ -1404,6 +1377,8 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue):
|
||||
|
||||
Indices of the minimum elements of a N-dimensional array:
|
||||
|
||||
>>> a.flat[np.argmin(a)]
|
||||
10
|
||||
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
|
||||
>>> ind
|
||||
(0, 0)
|
||||
@@ -2028,15 +2003,6 @@ def nonzero(a):
|
||||
To group the indices by element, rather than dimension, use `argwhere`,
|
||||
which returns a row for each non-zero element.
|
||||
|
||||
.. note::
|
||||
|
||||
When called on a zero-d array or scalar, ``nonzero(a)`` is treated
|
||||
as ``nonzero(atleast_1d(a))``.
|
||||
|
||||
.. deprecated:: 1.17.0
|
||||
|
||||
Use `atleast_1d` explicitly if this behavior is deliberate.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a : array_like
|
||||
@@ -2450,19 +2416,12 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
|
||||
"""
|
||||
if isinstance(a, _gentype):
|
||||
# 2018-02-25, 1.15.0
|
||||
warnings.warn(
|
||||
"Calling np.sum(generator) is deprecated, and in the future will "
|
||||
"give a different result. Use np.sum(np.fromiter(generator)) or "
|
||||
raise TypeError(
|
||||
"Calling np.sum(generator) is deprecated."
|
||||
"Use np.sum(np.fromiter(generator)) or "
|
||||
"the python sum builtin instead.",
|
||||
DeprecationWarning, stacklevel=2
|
||||
)
|
||||
|
||||
res = _sum_(a)
|
||||
if out is not None:
|
||||
out[...] = res
|
||||
return out
|
||||
return res
|
||||
|
||||
return _wrapreduction(
|
||||
a, np.add, 'sum', axis, dtype, out,
|
||||
keepdims=keepdims, initial=initial, where=where
|
||||
@@ -3572,10 +3531,13 @@ def size(a, axis=None):
|
||||
----------
|
||||
a : array_like
|
||||
Input data.
|
||||
axis : int, optional
|
||||
Axis along which the elements are counted. By default, give
|
||||
axis : None or int or tuple of ints, optional
|
||||
Axis or axes along which the elements are counted. By default, give
|
||||
the total number of elements.
|
||||
|
||||
.. versionchanged:: 2.4
|
||||
Extended to accept multiple axes.
|
||||
|
||||
Returns
|
||||
-------
|
||||
element_count : int
|
||||
@@ -3593,10 +3555,12 @@ def size(a, axis=None):
|
||||
>>> a = np.array([[1,2,3],[4,5,6]])
|
||||
>>> np.size(a)
|
||||
6
|
||||
>>> np.size(a,1)
|
||||
>>> np.size(a,axis=1)
|
||||
3
|
||||
>>> np.size(a,0)
|
||||
>>> np.size(a,axis=0)
|
||||
2
|
||||
>>> np.size(a,axis=(0,1))
|
||||
6
|
||||
|
||||
"""
|
||||
if axis is None:
|
||||
@@ -3605,10 +3569,10 @@ def size(a, axis=None):
|
||||
except AttributeError:
|
||||
return asarray(a).size
|
||||
else:
|
||||
try:
|
||||
return a.shape[axis]
|
||||
except AttributeError:
|
||||
return asarray(a).shape[axis]
|
||||
_shape = shape(a)
|
||||
from .numeric import normalize_axis_tuple
|
||||
axis = normalize_axis_tuple(axis, len(_shape), allow_duplicate=False)
|
||||
return math.prod(_shape[ax] for ax in axis)
|
||||
|
||||
|
||||
def _round_dispatcher(a, decimals=None, out=None):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
||||
import functools
|
||||
import inspect
|
||||
import operator
|
||||
import types
|
||||
import warnings
|
||||
@@ -477,6 +478,9 @@ def _add_docstring(obj, doc, warn_on_python):
|
||||
"Prefer to attach it directly to the source.",
|
||||
UserWarning,
|
||||
stacklevel=3)
|
||||
|
||||
doc = inspect.cleandoc(doc)
|
||||
|
||||
try:
|
||||
add_docstring(obj, doc)
|
||||
except Exception:
|
||||
@@ -494,10 +498,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True):
|
||||
----------
|
||||
place : str
|
||||
The absolute name of the module to import from
|
||||
obj : str or None
|
||||
obj : str | None
|
||||
The name of the object to add documentation to, typically a class or
|
||||
function name.
|
||||
doc : {str, Tuple[str, str], List[Tuple[str, str]]}
|
||||
doc : str | tuple[str, str] | list[tuple[str, str]]
|
||||
If a string, the documentation to apply to `obj`
|
||||
|
||||
If a tuple, then the first element is interpreted as an attribute
|
||||
@@ -534,12 +538,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True):
|
||||
if isinstance(doc, str):
|
||||
if "${ARRAY_FUNCTION_LIKE}" in doc:
|
||||
doc = overrides.get_array_function_like_doc(new, doc)
|
||||
_add_docstring(new, doc.strip(), warn_on_python)
|
||||
_add_docstring(new, doc, warn_on_python)
|
||||
elif isinstance(doc, tuple):
|
||||
attr, docstring = doc
|
||||
_add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
|
||||
_add_docstring(getattr(new, attr), docstring, warn_on_python)
|
||||
elif isinstance(doc, list):
|
||||
for attr, docstring in doc:
|
||||
_add_docstring(
|
||||
getattr(new, attr), docstring.strip(), warn_on_python
|
||||
)
|
||||
_add_docstring(getattr(new, attr), docstring, warn_on_python)
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from typing import Literal as L
|
||||
from typing import SupportsIndex, TypeAlias, TypeVar, overload
|
||||
|
||||
from _typeshed import Incomplete
|
||||
from typing import Literal as L, SupportsIndex, TypeAlias, TypeVar, overload
|
||||
|
||||
import numpy as np
|
||||
from numpy._typing import (
|
||||
|
||||
@@ -3,16 +3,15 @@
|
||||
"""
|
||||
__all__ = ['finfo', 'iinfo']
|
||||
|
||||
import math
|
||||
import types
|
||||
import warnings
|
||||
from functools import cached_property
|
||||
|
||||
from numpy._utils import set_module
|
||||
|
||||
from . import numeric
|
||||
from . import numerictypes as ntypes
|
||||
from ._machar import MachAr
|
||||
from .numeric import array, inf, nan
|
||||
from .umath import exp2, isnan, log10, nextafter
|
||||
from . import numeric, numerictypes as ntypes
|
||||
from ._multiarray_umath import _populate_finfo_constants
|
||||
|
||||
|
||||
def _fr0(a):
|
||||
@@ -31,96 +30,6 @@ def _fr1(a):
|
||||
return a
|
||||
|
||||
|
||||
class MachArLike:
|
||||
""" Object to simulate MachAr instance """
|
||||
def __init__(self, ftype, *, eps, epsneg, huge, tiny,
|
||||
ibeta, smallest_subnormal=None, **kwargs):
|
||||
self.params = _MACHAR_PARAMS[ftype]
|
||||
self.ftype = ftype
|
||||
self.title = self.params['title']
|
||||
# Parameter types same as for discovered MachAr object.
|
||||
if not smallest_subnormal:
|
||||
self._smallest_subnormal = nextafter(
|
||||
self.ftype(0), self.ftype(1), dtype=self.ftype)
|
||||
else:
|
||||
self._smallest_subnormal = smallest_subnormal
|
||||
self.epsilon = self.eps = self._float_to_float(eps)
|
||||
self.epsneg = self._float_to_float(epsneg)
|
||||
self.xmax = self.huge = self._float_to_float(huge)
|
||||
self.xmin = self._float_to_float(tiny)
|
||||
self.smallest_normal = self.tiny = self._float_to_float(tiny)
|
||||
self.ibeta = self.params['itype'](ibeta)
|
||||
self.__dict__.update(kwargs)
|
||||
self.precision = int(-log10(self.eps))
|
||||
self.resolution = self._float_to_float(
|
||||
self._float_conv(10) ** (-self.precision))
|
||||
self._str_eps = self._float_to_str(self.eps)
|
||||
self._str_epsneg = self._float_to_str(self.epsneg)
|
||||
self._str_xmin = self._float_to_str(self.xmin)
|
||||
self._str_xmax = self._float_to_str(self.xmax)
|
||||
self._str_resolution = self._float_to_str(self.resolution)
|
||||
self._str_smallest_normal = self._float_to_str(self.xmin)
|
||||
|
||||
@property
|
||||
def smallest_subnormal(self):
|
||||
"""Return the value for the smallest subnormal.
|
||||
|
||||
Returns
|
||||
-------
|
||||
smallest_subnormal : float
|
||||
value for the smallest subnormal.
|
||||
|
||||
Warns
|
||||
-----
|
||||
UserWarning
|
||||
If the calculated value for the smallest subnormal is zero.
|
||||
"""
|
||||
# Check that the calculated value is not zero, in case it raises a
|
||||
# warning.
|
||||
value = self._smallest_subnormal
|
||||
if self.ftype(0) == value:
|
||||
warnings.warn(
|
||||
f'The value of the smallest subnormal for {self.ftype} type is zero.',
|
||||
UserWarning, stacklevel=2)
|
||||
|
||||
return self._float_to_float(value)
|
||||
|
||||
@property
|
||||
def _str_smallest_subnormal(self):
|
||||
"""Return the string representation of the smallest subnormal."""
|
||||
return self._float_to_str(self.smallest_subnormal)
|
||||
|
||||
def _float_to_float(self, value):
|
||||
"""Converts float to float.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : float
|
||||
value to be converted.
|
||||
"""
|
||||
return _fr1(self._float_conv(value))
|
||||
|
||||
def _float_conv(self, value):
|
||||
"""Converts float to conv.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : float
|
||||
value to be converted.
|
||||
"""
|
||||
return array([value], self.ftype)
|
||||
|
||||
def _float_to_str(self, value):
|
||||
"""Converts float to str.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : float
|
||||
value to be converted.
|
||||
"""
|
||||
return self.params['fmt'] % array(_fr0(value)[0], self.ftype)
|
||||
|
||||
|
||||
_convert_to_float = {
|
||||
ntypes.csingle: ntypes.single,
|
||||
ntypes.complex128: ntypes.float64,
|
||||
@@ -147,240 +56,6 @@ _MACHAR_PARAMS = {
|
||||
'fmt': '%12.5e',
|
||||
'title': _title_fmt.format('half')}}
|
||||
|
||||
# Key to identify the floating point type. Key is result of
|
||||
#
|
||||
# ftype = np.longdouble # or float64, float32, etc.
|
||||
# v = (ftype(-1.0) / ftype(10.0))
|
||||
# v.view(v.dtype.newbyteorder('<')).tobytes()
|
||||
#
|
||||
# Uses division to work around deficiencies in strtold on some platforms.
|
||||
# See:
|
||||
# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
|
||||
|
||||
_KNOWN_TYPES = {}
|
||||
def _register_type(machar, bytepat):
|
||||
_KNOWN_TYPES[bytepat] = machar
|
||||
|
||||
|
||||
_float_ma = {}
|
||||
|
||||
|
||||
def _register_known_types():
|
||||
# Known parameters for float16
|
||||
# See docstring of MachAr class for description of parameters.
|
||||
f16 = ntypes.float16
|
||||
float16_ma = MachArLike(f16,
|
||||
machep=-10,
|
||||
negep=-11,
|
||||
minexp=-14,
|
||||
maxexp=16,
|
||||
it=10,
|
||||
iexp=5,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=exp2(f16(-10)),
|
||||
epsneg=exp2(f16(-11)),
|
||||
huge=f16(65504),
|
||||
tiny=f16(2 ** -14))
|
||||
_register_type(float16_ma, b'f\xae')
|
||||
_float_ma[16] = float16_ma
|
||||
|
||||
# Known parameters for float32
|
||||
f32 = ntypes.float32
|
||||
float32_ma = MachArLike(f32,
|
||||
machep=-23,
|
||||
negep=-24,
|
||||
minexp=-126,
|
||||
maxexp=128,
|
||||
it=23,
|
||||
iexp=8,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=exp2(f32(-23)),
|
||||
epsneg=exp2(f32(-24)),
|
||||
huge=f32((1 - 2 ** -24) * 2**128),
|
||||
tiny=exp2(f32(-126)))
|
||||
_register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
|
||||
_float_ma[32] = float32_ma
|
||||
|
||||
# Known parameters for float64
|
||||
f64 = ntypes.float64
|
||||
epsneg_f64 = 2.0 ** -53.0
|
||||
tiny_f64 = 2.0 ** -1022.0
|
||||
float64_ma = MachArLike(f64,
|
||||
machep=-52,
|
||||
negep=-53,
|
||||
minexp=-1022,
|
||||
maxexp=1024,
|
||||
it=52,
|
||||
iexp=11,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=2.0 ** -52.0,
|
||||
epsneg=epsneg_f64,
|
||||
huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
|
||||
tiny=tiny_f64)
|
||||
_register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
|
||||
_float_ma[64] = float64_ma
|
||||
|
||||
# Known parameters for IEEE 754 128-bit binary float
|
||||
ld = ntypes.longdouble
|
||||
epsneg_f128 = exp2(ld(-113))
|
||||
tiny_f128 = exp2(ld(-16382))
|
||||
# Ignore runtime error when this is not f128
|
||||
with numeric.errstate(all='ignore'):
|
||||
huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
|
||||
float128_ma = MachArLike(ld,
|
||||
machep=-112,
|
||||
negep=-113,
|
||||
minexp=-16382,
|
||||
maxexp=16384,
|
||||
it=112,
|
||||
iexp=15,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=exp2(ld(-112)),
|
||||
epsneg=epsneg_f128,
|
||||
huge=huge_f128,
|
||||
tiny=tiny_f128)
|
||||
# IEEE 754 128-bit binary float
|
||||
_register_type(float128_ma,
|
||||
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
|
||||
_float_ma[128] = float128_ma
|
||||
|
||||
# Known parameters for float80 (Intel 80-bit extended precision)
|
||||
epsneg_f80 = exp2(ld(-64))
|
||||
tiny_f80 = exp2(ld(-16382))
|
||||
# Ignore runtime error when this is not f80
|
||||
with numeric.errstate(all='ignore'):
|
||||
huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
|
||||
float80_ma = MachArLike(ld,
|
||||
machep=-63,
|
||||
negep=-64,
|
||||
minexp=-16382,
|
||||
maxexp=16384,
|
||||
it=63,
|
||||
iexp=15,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=exp2(ld(-63)),
|
||||
epsneg=epsneg_f80,
|
||||
huge=huge_f80,
|
||||
tiny=tiny_f80)
|
||||
# float80, first 10 bytes containing actual storage
|
||||
_register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
|
||||
_float_ma[80] = float80_ma
|
||||
|
||||
# Guessed / known parameters for double double; see:
|
||||
# https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
|
||||
# These numbers have the same exponent range as float64, but extended
|
||||
# number of digits in the significand.
|
||||
huge_dd = nextafter(ld(inf), ld(0), dtype=ld)
|
||||
# As the smallest_normal in double double is so hard to calculate we set
|
||||
# it to NaN.
|
||||
smallest_normal_dd = nan
|
||||
# Leave the same value for the smallest subnormal as double
|
||||
smallest_subnormal_dd = ld(nextafter(0., 1.))
|
||||
float_dd_ma = MachArLike(ld,
|
||||
machep=-105,
|
||||
negep=-106,
|
||||
minexp=-1022,
|
||||
maxexp=1024,
|
||||
it=105,
|
||||
iexp=11,
|
||||
ibeta=2,
|
||||
irnd=5,
|
||||
ngrd=0,
|
||||
eps=exp2(ld(-105)),
|
||||
epsneg=exp2(ld(-106)),
|
||||
huge=huge_dd,
|
||||
tiny=smallest_normal_dd,
|
||||
smallest_subnormal=smallest_subnormal_dd)
|
||||
# double double; low, high order (e.g. PPC 64)
|
||||
_register_type(float_dd_ma,
|
||||
b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
|
||||
# double double; high, low order (e.g. PPC 64 le)
|
||||
_register_type(float_dd_ma,
|
||||
b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
|
||||
_float_ma['dd'] = float_dd_ma
|
||||
|
||||
|
||||
def _get_machar(ftype):
|
||||
""" Get MachAr instance or MachAr-like instance
|
||||
|
||||
Get parameters for floating point type, by first trying signatures of
|
||||
various known floating point types, then, if none match, attempting to
|
||||
identify parameters by analysis.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ftype : class
|
||||
Numpy floating point type class (e.g. ``np.float64``)
|
||||
|
||||
Returns
|
||||
-------
|
||||
ma_like : instance of :class:`MachAr` or :class:`MachArLike`
|
||||
Object giving floating point parameters for `ftype`.
|
||||
|
||||
Warns
|
||||
-----
|
||||
UserWarning
|
||||
If the binary signature of the float type is not in the dictionary of
|
||||
known float types.
|
||||
"""
|
||||
params = _MACHAR_PARAMS.get(ftype)
|
||||
if params is None:
|
||||
raise ValueError(repr(ftype))
|
||||
# Detect known / suspected types
|
||||
# ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold
|
||||
# may be deficient
|
||||
key = (ftype(-1.0) / ftype(10.))
|
||||
key = key.view(key.dtype.newbyteorder("<")).tobytes()
|
||||
ma_like = None
|
||||
if ftype == ntypes.longdouble:
|
||||
# Could be 80 bit == 10 byte extended precision, where last bytes can
|
||||
# be random garbage.
|
||||
# Comparing first 10 bytes to pattern first to avoid branching on the
|
||||
# random garbage.
|
||||
ma_like = _KNOWN_TYPES.get(key[:10])
|
||||
if ma_like is None:
|
||||
# see if the full key is known.
|
||||
ma_like = _KNOWN_TYPES.get(key)
|
||||
if ma_like is None and len(key) == 16:
|
||||
# machine limits could be f80 masquerading as np.float128,
|
||||
# find all keys with length 16 and make new dict, but make the keys
|
||||
# only 10 bytes long, the last bytes can be random garbage
|
||||
_kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16}
|
||||
ma_like = _kt.get(key[:10])
|
||||
if ma_like is not None:
|
||||
return ma_like
|
||||
# Fall back to parameter discovery
|
||||
warnings.warn(
|
||||
f'Signature {key} for {ftype} does not match any known type: '
|
||||
'falling back to type probe function.\n'
|
||||
'This warnings indicates broken support for the dtype!',
|
||||
UserWarning, stacklevel=2)
|
||||
return _discovered_machar(ftype)
|
||||
|
||||
|
||||
def _discovered_machar(ftype):
|
||||
""" Create MachAr instance with found information on float types
|
||||
|
||||
TODO: MachAr should be retired completely ideally. We currently only
|
||||
ever use it system with broken longdouble (valgrind, WSL).
|
||||
"""
|
||||
params = _MACHAR_PARAMS[ftype]
|
||||
return MachAr(lambda v: array([v], ftype),
|
||||
lambda v: _fr0(v.astype(params['itype']))[0],
|
||||
lambda v: array(_fr0(v)[0], ftype),
|
||||
lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
|
||||
params['title'])
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
class finfo:
|
||||
@@ -414,17 +89,20 @@ class finfo:
|
||||
The largest representable number.
|
||||
maxexp : int
|
||||
The smallest positive power of the base (2) that causes overflow.
|
||||
Corresponds to the C standard MAX_EXP.
|
||||
min : floating point number of the appropriate type
|
||||
The smallest representable number, typically ``-max``.
|
||||
minexp : int
|
||||
The most negative power of the base (2) consistent with there
|
||||
being no leading 0's in the mantissa.
|
||||
being no leading 0's in the mantissa. Corresponds to the C
|
||||
standard MIN_EXP - 1.
|
||||
negep : int
|
||||
The exponent that yields `epsneg`.
|
||||
nexp : int
|
||||
The number of bits in the exponent including its sign and bias.
|
||||
nmant : int
|
||||
The number of bits in the mantissa.
|
||||
The number of explicit bits in the mantissa (excluding the implicit
|
||||
leading bit for normalized numbers).
|
||||
precision : int
|
||||
The approximate number of decimal digits to which this kind of
|
||||
float is precise.
|
||||
@@ -465,6 +143,12 @@ class finfo:
|
||||
fill the gap between 0 and ``smallest_normal``. However, subnormal numbers
|
||||
may have significantly reduced precision [2]_.
|
||||
|
||||
For ``longdouble``, the representation varies across platforms. On most
|
||||
platforms it is IEEE 754 binary128 (quad precision) or binary64-extended
|
||||
(80-bit extended precision). On PowerPC systems, it may use the IBM
|
||||
double-double format (a pair of float64 values), which has special
|
||||
characteristics for precision and range.
|
||||
|
||||
This function can also be used for complex data types as well. If used,
|
||||
the output will be the same as the corresponding real float type
|
||||
(e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)).
|
||||
@@ -549,77 +233,107 @@ class finfo:
|
||||
|
||||
def _init(self, dtype):
|
||||
self.dtype = numeric.dtype(dtype)
|
||||
machar = _get_machar(dtype)
|
||||
|
||||
for word in ['precision', 'iexp',
|
||||
'maxexp', 'minexp', 'negep',
|
||||
'machep']:
|
||||
setattr(self, word, getattr(machar, word))
|
||||
for word in ['resolution', 'epsneg', 'smallest_subnormal']:
|
||||
setattr(self, word, getattr(machar, word).flat[0])
|
||||
self.bits = self.dtype.itemsize * 8
|
||||
self.max = machar.huge.flat[0]
|
||||
self.min = -self.max
|
||||
self.eps = machar.eps.flat[0]
|
||||
self.nexp = machar.iexp
|
||||
self.nmant = machar.it
|
||||
self._machar = machar
|
||||
self._str_tiny = machar._str_xmin.strip()
|
||||
self._str_max = machar._str_xmax.strip()
|
||||
self._str_epsneg = machar._str_epsneg.strip()
|
||||
self._str_eps = machar._str_eps.strip()
|
||||
self._str_resolution = machar._str_resolution.strip()
|
||||
self._str_smallest_normal = machar._str_smallest_normal.strip()
|
||||
self._str_smallest_subnormal = machar._str_smallest_subnormal.strip()
|
||||
self._fmt = None
|
||||
self._repr = None
|
||||
_populate_finfo_constants(self, self.dtype)
|
||||
return self
|
||||
|
||||
@cached_property
|
||||
def epsneg(self):
|
||||
# Assume typical floating point logic. Could also use nextafter.
|
||||
return self.eps / self._radix
|
||||
|
||||
@cached_property
|
||||
def resolution(self):
|
||||
return self.dtype.type(10)**-self.precision
|
||||
|
||||
@cached_property
|
||||
def machep(self):
|
||||
return int(math.log2(self.eps))
|
||||
|
||||
@cached_property
|
||||
def negep(self):
|
||||
return int(math.log2(self.epsneg))
|
||||
|
||||
@cached_property
|
||||
def nexp(self):
|
||||
# considering all ones (inf/nan) and all zeros (subnormal/zero)
|
||||
return math.ceil(math.log2(self.maxexp - self.minexp + 2))
|
||||
|
||||
@cached_property
|
||||
def iexp(self):
|
||||
# Calculate exponent bits from it's range:
|
||||
return math.ceil(math.log2(self.maxexp - self.minexp))
|
||||
|
||||
def __str__(self):
|
||||
if (fmt := getattr(self, "_fmt", None)) is not None:
|
||||
return fmt
|
||||
|
||||
def get_str(name, pad=None):
|
||||
if (val := getattr(self, name, None)) is None:
|
||||
return "<undefined>"
|
||||
if pad is not None:
|
||||
s = str(val).ljust(pad)
|
||||
return str(val)
|
||||
|
||||
precision = get_str("precision", 3)
|
||||
machep = get_str("machep", 6)
|
||||
negep = get_str("negep", 6)
|
||||
minexp = get_str("minexp", 6)
|
||||
maxexp = get_str("maxexp", 6)
|
||||
resolution = get_str("resolution")
|
||||
eps = get_str("eps")
|
||||
epsneg = get_str("epsneg")
|
||||
tiny = get_str("tiny")
|
||||
smallest_normal = get_str("smallest_normal")
|
||||
smallest_subnormal = get_str("smallest_subnormal")
|
||||
nexp = get_str("nexp", 6)
|
||||
max_ = get_str("max")
|
||||
if hasattr(self, "min") and hasattr(self, "max") and -self.min == self.max:
|
||||
min_ = "-max"
|
||||
else:
|
||||
min_ = get_str("min")
|
||||
|
||||
fmt = (
|
||||
'Machine parameters for %(dtype)s\n'
|
||||
'---------------------------------------------------------------\n'
|
||||
'precision = %(precision)3s resolution = %(_str_resolution)s\n'
|
||||
'machep = %(machep)6s eps = %(_str_eps)s\n'
|
||||
'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
|
||||
'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
|
||||
'maxexp = %(maxexp)6s max = %(_str_max)s\n'
|
||||
'nexp = %(nexp)6s min = -max\n'
|
||||
'smallest_normal = %(_str_smallest_normal)s '
|
||||
'smallest_subnormal = %(_str_smallest_subnormal)s\n'
|
||||
'---------------------------------------------------------------\n'
|
||||
)
|
||||
return fmt % self.__dict__
|
||||
f'Machine parameters for {self.dtype}\n'
|
||||
f'---------------------------------------------------------------\n'
|
||||
f'precision = {precision} resolution = {resolution}\n'
|
||||
f'machep = {machep} eps = {eps}\n'
|
||||
f'negep = {negep} epsneg = {epsneg}\n'
|
||||
f'minexp = {minexp} tiny = {tiny}\n'
|
||||
f'maxexp = {maxexp} max = {max_}\n'
|
||||
f'nexp = {nexp} min = {min_}\n'
|
||||
f'smallest_normal = {smallest_normal} '
|
||||
f'smallest_subnormal = {smallest_subnormal}\n'
|
||||
f'---------------------------------------------------------------\n'
|
||||
)
|
||||
self._fmt = fmt
|
||||
return fmt
|
||||
|
||||
def __repr__(self):
|
||||
if (repr_str := getattr(self, "_repr", None)) is not None:
|
||||
return repr_str
|
||||
|
||||
c = self.__class__.__name__
|
||||
d = self.__dict__.copy()
|
||||
d['klass'] = c
|
||||
return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
|
||||
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
|
||||
|
||||
@property
|
||||
def smallest_normal(self):
|
||||
"""Return the value for the smallest normal.
|
||||
# Use precision+1 digits in exponential notation
|
||||
fmt_str = _MACHAR_PARAMS.get(self.dtype.type, {}).get('fmt', '%s')
|
||||
if fmt_str != '%s' and hasattr(self, 'max') and hasattr(self, 'min'):
|
||||
max_str = (fmt_str % self.max).strip()
|
||||
min_str = (fmt_str % self.min).strip()
|
||||
else:
|
||||
max_str = str(self.max)
|
||||
min_str = str(self.min)
|
||||
|
||||
Returns
|
||||
-------
|
||||
smallest_normal : float
|
||||
Value for the smallest normal.
|
||||
resolution_str = str(self.resolution)
|
||||
|
||||
Warns
|
||||
-----
|
||||
UserWarning
|
||||
If the calculated value for the smallest normal is requested for
|
||||
double-double.
|
||||
"""
|
||||
# This check is necessary because the value for smallest_normal is
|
||||
# platform dependent for longdouble types.
|
||||
if isnan(self._machar.smallest_normal.flat[0]):
|
||||
warnings.warn(
|
||||
'The value of smallest normal is undefined for double double',
|
||||
UserWarning, stacklevel=2)
|
||||
return self._machar.smallest_normal.flat[0]
|
||||
repr_str = (f"{c}(resolution={resolution_str}, min={min_str},"
|
||||
f" max={max_str}, dtype={self.dtype})")
|
||||
self._repr = repr_str
|
||||
return repr_str
|
||||
|
||||
@property
|
||||
@cached_property
|
||||
def tiny(self):
|
||||
"""Return the value for tiny, alias of smallest_normal.
|
||||
|
||||
|
||||
@@ -1,3 +1,124 @@
|
||||
from numpy import finfo, iinfo
|
||||
from functools import cached_property
|
||||
from types import GenericAlias
|
||||
from typing import Final, Generic, Self, overload
|
||||
from typing_extensions import TypeVar
|
||||
|
||||
import numpy as np
|
||||
from numpy._typing import (
|
||||
_CLongDoubleCodes,
|
||||
_Complex64Codes,
|
||||
_Complex128Codes,
|
||||
_DTypeLike,
|
||||
_Float16Codes,
|
||||
_Float32Codes,
|
||||
_Float64Codes,
|
||||
_Int8Codes,
|
||||
_Int16Codes,
|
||||
_Int32Codes,
|
||||
_Int64Codes,
|
||||
_IntPCodes,
|
||||
_LongDoubleCodes,
|
||||
_UInt8Codes,
|
||||
_UInt16Codes,
|
||||
_UInt32Codes,
|
||||
_UInt64Codes,
|
||||
)
|
||||
|
||||
__all__ = ["finfo", "iinfo"]
|
||||
|
||||
###
|
||||
|
||||
_IntegerT_co = TypeVar("_IntegerT_co", bound=np.integer, default=np.integer, covariant=True)
|
||||
_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=np.floating, covariant=True)
|
||||
|
||||
###
|
||||
|
||||
class iinfo(Generic[_IntegerT_co]):
|
||||
dtype: np.dtype[_IntegerT_co]
|
||||
bits: Final[int]
|
||||
kind: Final[str]
|
||||
key: Final[str]
|
||||
|
||||
@property
|
||||
def min(self, /) -> int: ...
|
||||
@property
|
||||
def max(self, /) -> int: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def __init__(self, /, int_type: _IntegerT_co | _DTypeLike[_IntegerT_co]) -> None: ...
|
||||
@overload
|
||||
def __init__(self: iinfo[np.int_], /, int_type: _IntPCodes | type[int] | int) -> None: ...
|
||||
@overload
|
||||
def __init__(self: iinfo[np.int8], /, int_type: _Int8Codes) -> None: ...
|
||||
@overload
|
||||
def __init__(self: iinfo[np.uint8], /, int_type: _UInt8Codes) -> None: ...
|
||||
@overload
|
||||
def __init__(self: iinfo[np.int16], /, int_type: _Int16Codes) -> None: ...
|
||||
@overload
|
||||
def __init__(self: iinfo[np.uint16], /, int_type: _UInt16Codes) -> None: ...
|
||||
@overload
|
||||
def __init__(self: iinfo[np.int32], /, int_type: _Int32Codes) -> None: ...
|
||||
@overload
|
||||
def __init__(self: iinfo[np.uint32], /, int_type: _UInt32Codes) -> None: ...
|
||||
@overload
|
||||
def __init__(self: iinfo[np.int64], /, int_type: _Int64Codes) -> None: ...
|
||||
@overload
|
||||
def __init__(self: iinfo[np.uint64], /, int_type: _UInt64Codes) -> None: ...
|
||||
@overload
|
||||
def __init__(self, /, int_type: str) -> None: ...
|
||||
|
||||
#
|
||||
@classmethod
|
||||
def __class_getitem__(cls, item: object, /) -> GenericAlias: ...
|
||||
|
||||
class finfo(Generic[_FloatingT_co]):
|
||||
dtype: np.dtype[_FloatingT_co] # readonly
|
||||
eps: _FloatingT_co # readonly
|
||||
_radix: _FloatingT_co # readonly
|
||||
smallest_normal: _FloatingT_co # readonly
|
||||
smallest_subnormal: _FloatingT_co # readonly
|
||||
max: _FloatingT_co # readonly
|
||||
min: _FloatingT_co # readonly
|
||||
|
||||
_fmt: str | None # `__str__` cache
|
||||
_repr: str | None # `__repr__` cache
|
||||
|
||||
bits: Final[int]
|
||||
maxexp: Final[int]
|
||||
minexp: Final[int]
|
||||
nmant: Final[int]
|
||||
precision: Final[int]
|
||||
|
||||
@classmethod
|
||||
def __class_getitem__(cls, item: object, /) -> GenericAlias: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def __new__(cls, dtype: _FloatingT_co | _DTypeLike[_FloatingT_co]) -> Self: ...
|
||||
@overload
|
||||
def __new__(cls, dtype: _Float16Codes) -> finfo[np.float16]: ...
|
||||
@overload
|
||||
def __new__(cls, dtype: _Float32Codes | _Complex64Codes | _DTypeLike[np.complex64]) -> finfo[np.float32]: ...
|
||||
@overload
|
||||
def __new__(cls, dtype: _Float64Codes | _Complex128Codes | type[complex] | complex) -> finfo[np.float64]: ...
|
||||
@overload
|
||||
def __new__(cls, dtype: _LongDoubleCodes | _CLongDoubleCodes | _DTypeLike[np.clongdouble]) -> finfo[np.longdouble]: ...
|
||||
@overload
|
||||
def __new__(cls, dtype: str) -> finfo: ...
|
||||
|
||||
#
|
||||
@cached_property
|
||||
def epsneg(self, /) -> _FloatingT_co: ...
|
||||
@cached_property
|
||||
def resolution(self, /) -> _FloatingT_co: ...
|
||||
@cached_property
|
||||
def machep(self, /) -> int: ...
|
||||
@cached_property
|
||||
def negep(self, /) -> int: ...
|
||||
@cached_property
|
||||
def nexp(self, /) -> int: ...
|
||||
@cached_property
|
||||
def iexp(self, /) -> int: ...
|
||||
@cached_property
|
||||
def tiny(self, /) -> _FloatingT_co: ...
|
||||
|
||||
@@ -1587,6 +1587,12 @@ _import_array(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if (SWIG_VERSION < 0x040400)
|
||||
#define _RETURN_VALUE NULL
|
||||
#else
|
||||
#define _RETURN_VALUE 0
|
||||
#endif
|
||||
|
||||
#define import_array() { \
|
||||
if (_import_array() < 0) { \
|
||||
PyErr_Print(); \
|
||||
@@ -1594,7 +1600,7 @@ _import_array(void)
|
||||
PyExc_ImportError, \
|
||||
"numpy._core.multiarray failed to import" \
|
||||
); \
|
||||
return NULL; \
|
||||
return _RETURN_VALUE; \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
@@ -50,5 +50,6 @@ void *PyUFunc_API[] = {
|
||||
(void *) PyUFunc_AddLoopFromSpec,
|
||||
(void *) PyUFunc_AddPromoter,
|
||||
(void *) PyUFunc_AddWrappingLoop,
|
||||
(void *) PyUFunc_GiveFloatingpointErrors
|
||||
(void *) PyUFunc_GiveFloatingpointErrors,
|
||||
(void *) PyUFunc_AddLoopsFromSpecs
|
||||
};
|
||||
|
||||
@@ -87,6 +87,8 @@ NPY_NO_EXPORT int PyUFunc_AddWrappingLoop \
|
||||
(PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *);
|
||||
NPY_NO_EXPORT int PyUFunc_GiveFloatingpointErrors \
|
||||
(const char *, int);
|
||||
NPY_NO_EXPORT int PyUFunc_AddLoopsFromSpecs \
|
||||
(PyUFunc_LoopSlot *);
|
||||
|
||||
#else
|
||||
|
||||
@@ -249,6 +251,12 @@ static void **PyUFunc_API=NULL;
|
||||
PyUFunc_API[46])
|
||||
#endif
|
||||
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION
|
||||
#define PyUFunc_AddLoopsFromSpecs \
|
||||
(*(int (*)(PyUFunc_LoopSlot *)) \
|
||||
PyUFunc_API[47])
|
||||
#endif
|
||||
|
||||
static inline int
|
||||
_import_umath(void)
|
||||
{
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
|
||||
#define NPY_ABI_VERSION 0x02000000
|
||||
#define NPY_API_VERSION 0x00000014
|
||||
#define NPY_API_VERSION 0x00000015
|
||||
|
||||
#ifndef __STDC_FORMAT_MACROS
|
||||
#define __STDC_FORMAT_MACROS 1
|
||||
|
||||
@@ -173,9 +173,11 @@ typedef struct {
|
||||
#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1])))
|
||||
#define PyArrayScalar_FromLong(i) \
|
||||
((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)])))
|
||||
#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \
|
||||
return Py_INCREF(PyArrayScalar_FromLong(i)), \
|
||||
PyArrayScalar_FromLong(i)
|
||||
#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) do { \
|
||||
PyObject *obj = PyArrayScalar_FromLong(i); \
|
||||
Py_INCREF(obj); \
|
||||
return obj; \
|
||||
} while (0)
|
||||
#define PyArrayScalar_RETURN_FALSE \
|
||||
return Py_INCREF(PyArrayScalar_False), \
|
||||
PyArrayScalar_False
|
||||
|
||||
@@ -99,6 +99,11 @@ typedef enum {
|
||||
} NPY_ARRAYMETHOD_FLAGS;
|
||||
|
||||
|
||||
typedef enum {
|
||||
/* Casting via same_value logic */
|
||||
NPY_SAME_VALUE_CONTEXT_FLAG=1,
|
||||
} NPY_ARRAYMETHOD_CONTEXT_FLAGS;
|
||||
|
||||
typedef struct PyArrayMethod_Context_tag {
|
||||
/* The caller, which is typically the original ufunc. May be NULL */
|
||||
PyObject *caller;
|
||||
@@ -107,7 +112,22 @@ typedef struct PyArrayMethod_Context_tag {
|
||||
|
||||
/* Operand descriptors, filled in by resolve_descriptors */
|
||||
PyArray_Descr *const *descriptors;
|
||||
#if NPY_FEATURE_VERSION > NPY_2_3_API_VERSION
|
||||
void * _reserved;
|
||||
/*
|
||||
* Optional flag to pass information into the inner loop
|
||||
* NPY_ARRAYMETHOD_CONTEXT_FLAGS
|
||||
*/
|
||||
uint64_t flags;
|
||||
|
||||
/*
|
||||
* Optional run-time parameters to pass to the loop (currently used in sorting).
|
||||
* Fixed parameters are expected to be passed via auxdata.
|
||||
*/
|
||||
void *parameters;
|
||||
|
||||
/* Structure may grow (this is harmless for DType authors) */
|
||||
#endif
|
||||
} PyArrayMethod_Context;
|
||||
|
||||
|
||||
@@ -125,6 +145,13 @@ typedef struct {
|
||||
} PyArrayMethod_Spec;
|
||||
|
||||
|
||||
// This is used for the convenience function `PyUFunc_AddLoopsFromSpecs`
|
||||
typedef struct {
|
||||
const char *name;
|
||||
PyArrayMethod_Spec *spec;
|
||||
} PyUFunc_LoopSlot;
|
||||
|
||||
|
||||
/*
|
||||
* ArrayMethod slots
|
||||
* -----------------
|
||||
@@ -144,7 +171,6 @@ typedef struct {
|
||||
#define NPY_METH_contiguous_indexed_loop 9
|
||||
#define _NPY_METH_static_data 10
|
||||
|
||||
|
||||
/*
|
||||
* The resolve descriptors function, must be able to handle NULL values for
|
||||
* all output (but not input) `given_descrs` and fill `loop_descrs`.
|
||||
@@ -367,6 +393,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc,
|
||||
#define NPY_DT_get_clear_loop 9
|
||||
#define NPY_DT_get_fill_zero_loop 10
|
||||
#define NPY_DT_finalize_descr 11
|
||||
#define NPY_DT_get_constant 12
|
||||
|
||||
// These PyArray_ArrFunc slots will be deprecated and replaced eventually
|
||||
// getitem and setitem can be defined as a performance optimization;
|
||||
@@ -377,7 +404,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc,
|
||||
|
||||
// used to separate dtype slots from arrfuncs slots
|
||||
// intended only for internal use but defined here for clarity
|
||||
#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10)
|
||||
#define _NPY_DT_ARRFUNCS_OFFSET (1 << 11)
|
||||
|
||||
// Cast is disabled
|
||||
// #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET
|
||||
@@ -467,6 +494,42 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype);
|
||||
*/
|
||||
typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype);
|
||||
|
||||
/*
|
||||
* Constants that can be queried and used e.g. by reduce identies defaults.
|
||||
* These are also used to expose .finfo and .iinfo for example.
|
||||
*/
|
||||
/* Numerical constants */
|
||||
#define NPY_CONSTANT_zero 1
|
||||
#define NPY_CONSTANT_one 2
|
||||
#define NPY_CONSTANT_all_bits_set 3
|
||||
#define NPY_CONSTANT_maximum_finite 4
|
||||
#define NPY_CONSTANT_minimum_finite 5
|
||||
#define NPY_CONSTANT_inf 6
|
||||
#define NPY_CONSTANT_ninf 7
|
||||
#define NPY_CONSTANT_nan 8
|
||||
#define NPY_CONSTANT_finfo_radix 9
|
||||
#define NPY_CONSTANT_finfo_eps 10
|
||||
#define NPY_CONSTANT_finfo_smallest_normal 11
|
||||
#define NPY_CONSTANT_finfo_smallest_subnormal 12
|
||||
/* Constants that are always of integer type, value is `npy_intp/Py_ssize_t` */
|
||||
#define NPY_CONSTANT_finfo_nmant (1 << 16) + 0
|
||||
#define NPY_CONSTANT_finfo_min_exp (1 << 16) + 1
|
||||
#define NPY_CONSTANT_finfo_max_exp (1 << 16) + 2
|
||||
#define NPY_CONSTANT_finfo_decimal_digits (1 << 16) + 3
|
||||
|
||||
/* It may make sense to continue with other constants here, e.g. pi, etc? */
|
||||
|
||||
/*
|
||||
* Function to get a constant value for the dtype. Data may be unaligned, the
|
||||
* function is always called with the GIL held.
|
||||
*
|
||||
* @param descr The dtype instance (i.e. self)
|
||||
* @param ID The ID of the constant to get.
|
||||
* @param data Pointer to the data to be written too, may be unaligned.
|
||||
* @returns 1 on success, 0 if the constant is not available, or -1 with an error set.
|
||||
*/
|
||||
typedef int (PyArrayDTypeMeta_GetConstant)(PyArray_Descr *descr, int ID, void *data);
|
||||
|
||||
/*
|
||||
* TODO: These two functions are currently only used for experimental DType
|
||||
* API support. Their relation should be "reversed": NumPy should
|
||||
@@ -477,4 +540,8 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtyp
|
||||
typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *);
|
||||
typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *);
|
||||
|
||||
typedef struct {
|
||||
NPY_SORTKIND flags;
|
||||
} PyArrayMethod_SortParameters;
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */
|
||||
|
||||
@@ -162,18 +162,37 @@ enum NPY_TYPECHAR {
|
||||
};
|
||||
|
||||
/*
|
||||
* Changing this may break Numpy API compatibility
|
||||
* due to changing offsets in PyArray_ArrFuncs, so be
|
||||
* careful. Here we have reused the mergesort slot for
|
||||
* any kind of stable sort, the actual implementation will
|
||||
* depend on the data type.
|
||||
* Changing this may break Numpy API compatibility due to changing offsets in
|
||||
* PyArray_ArrFuncs, so be careful. Here we have reused the mergesort slot for
|
||||
* any kind of stable sort, the actual implementation will depend on the data
|
||||
* type.
|
||||
*
|
||||
* Updated in NumPy 2.4
|
||||
*
|
||||
* Updated with new names denoting requirements rather than specifying a
|
||||
* particular algorithm. All the previous values are reused in a way that
|
||||
* should be downstream compatible, but the actual algorithms used may be
|
||||
* different than before. The new approach should be more flexible and easier
|
||||
* to update.
|
||||
*
|
||||
* Names with a leading underscore are private, and should only be used
|
||||
* internally by NumPy.
|
||||
*
|
||||
* NPY_NSORTS remains the same for backwards compatibility, it should not be
|
||||
* changed.
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
_NPY_SORT_UNDEFINED=-1,
|
||||
NPY_QUICKSORT=0,
|
||||
NPY_HEAPSORT=1,
|
||||
NPY_MERGESORT=2,
|
||||
NPY_STABLESORT=2,
|
||||
_NPY_SORT_UNDEFINED = -1,
|
||||
NPY_QUICKSORT = 0,
|
||||
NPY_HEAPSORT = 1,
|
||||
NPY_MERGESORT = 2,
|
||||
NPY_STABLESORT = 2,
|
||||
// new style names
|
||||
_NPY_SORT_HEAPSORT = 1,
|
||||
NPY_SORT_DEFAULT = 0,
|
||||
NPY_SORT_STABLE = 2,
|
||||
NPY_SORT_DESCENDING = 4,
|
||||
} NPY_SORTKIND;
|
||||
#define NPY_NSORTS (NPY_STABLESORT + 1)
|
||||
|
||||
@@ -214,6 +233,16 @@ typedef enum {
|
||||
NPY_KEEPORDER=2
|
||||
} NPY_ORDER;
|
||||
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION
|
||||
/*
|
||||
* check that no values overflow/change during casting
|
||||
* Used explicitly only in the ArrayMethod creation or resolve_dtypes functions to
|
||||
* indicate that a same-value cast is supported. In external APIs, use only
|
||||
* NPY_SAME_VALUE_CASTING
|
||||
*/
|
||||
#define NPY_SAME_VALUE_CASTING_FLAG 64
|
||||
#endif
|
||||
|
||||
/* For specifying allowed casting in operations which support it */
|
||||
typedef enum {
|
||||
_NPY_ERROR_OCCURRED_IN_CAST = -1,
|
||||
@@ -227,6 +256,9 @@ typedef enum {
|
||||
NPY_SAME_KIND_CASTING=3,
|
||||
/* Allow any casts */
|
||||
NPY_UNSAFE_CASTING=4,
|
||||
#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION
|
||||
NPY_SAME_VALUE_CASTING=NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG,
|
||||
#endif
|
||||
} NPY_CASTING;
|
||||
|
||||
typedef enum {
|
||||
|
||||
@@ -242,7 +242,7 @@ static inline PyObject*
|
||||
npy_PyFile_OpenFile(PyObject *filename, const char *mode)
|
||||
{
|
||||
PyObject *open;
|
||||
open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");
|
||||
open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); // noqa: borrowed-ref OK
|
||||
if (open == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -98,11 +98,23 @@
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define NPY_FINLINE static __forceinline
|
||||
#ifdef __cplusplus
|
||||
#define NPY_FINLINE __forceinline
|
||||
#else
|
||||
#define NPY_FINLINE static __forceinline
|
||||
#endif
|
||||
#elif defined(__GNUC__)
|
||||
#define NPY_FINLINE static inline __attribute__((always_inline))
|
||||
#ifdef __cplusplus
|
||||
#define NPY_FINLINE inline __attribute__((always_inline))
|
||||
#else
|
||||
#define NPY_FINLINE static inline __attribute__((always_inline))
|
||||
#endif
|
||||
#else
|
||||
#define NPY_FINLINE static
|
||||
#ifdef __cplusplus
|
||||
#define NPY_FINLINE inline
|
||||
#else
|
||||
#define NPY_FINLINE static NPY_INLINE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
* NPY_CPU_RISCV64
|
||||
* NPY_CPU_RISCV32
|
||||
* NPY_CPU_LOONGARCH
|
||||
* NPY_CPU_SW_64
|
||||
* NPY_CPU_WASM
|
||||
*/
|
||||
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
|
||||
@@ -111,14 +112,15 @@
|
||||
#endif
|
||||
#elif defined(__loongarch_lp64)
|
||||
#define NPY_CPU_LOONGARCH64
|
||||
#elif defined(__EMSCRIPTEN__)
|
||||
#elif defined(__sw_64__)
|
||||
#define NPY_CPU_SW_64
|
||||
#elif defined(__EMSCRIPTEN__) || defined(__wasm__)
|
||||
/* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
|
||||
/* __wasm__ is defined by clang when targeting wasm */
|
||||
#define NPY_CPU_WASM
|
||||
#else
|
||||
#error Unknown CPU, please report this to numpy maintainers with \
|
||||
information about your platform (OS, CPU and compiler)
|
||||
#endif
|
||||
|
||||
#define NPY_ALIGNMENT_REQUIRED 1
|
||||
|
||||
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */
|
||||
|
||||
@@ -51,6 +51,7 @@
|
||||
|| defined(NPY_CPU_RISCV64) \
|
||||
|| defined(NPY_CPU_RISCV32) \
|
||||
|| defined(NPY_CPU_LOONGARCH) \
|
||||
|| defined(NPY_CPU_SW_64) \
|
||||
|| defined(NPY_CPU_WASM)
|
||||
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
|
||||
|
||||
|
||||
@@ -84,6 +84,7 @@
|
||||
#define NPY_2_1_API_VERSION 0x00000013
|
||||
#define NPY_2_2_API_VERSION 0x00000013
|
||||
#define NPY_2_3_API_VERSION 0x00000014
|
||||
#define NPY_2_4_API_VERSION 0x00000015
|
||||
|
||||
|
||||
/*
|
||||
@@ -174,6 +175,8 @@
|
||||
#define NPY_FEATURE_VERSION_STRING "2.1"
|
||||
#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "2.3"
|
||||
#elif NPY_FEATURE_VERSION == NPY_2_4_API_VERSION
|
||||
#define NPY_FEATURE_VERSION_STRING "2.4"
|
||||
#else
|
||||
#error "Missing version string define for new NumPy version."
|
||||
#endif
|
||||
|
||||
Binary file not shown.
@@ -3,5 +3,5 @@ includedir=${prefix}/include
|
||||
|
||||
Name: numpy
|
||||
Description: NumPy is the fundamental package for scientific computing with Python.
|
||||
Version: 2.3.4
|
||||
Version: 2.4.0
|
||||
Cflags: -I${includedir}
|
||||
|
||||
@@ -114,11 +114,20 @@ array_function_from_c_func_and_dispatcher = functools.partial(
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
|
||||
def empty_like(
|
||||
prototype, dtype=None, order=None, subok=None, shape=None, *, device=None
|
||||
prototype, dtype=None, order="K", subok=True, shape=None, *, device=None
|
||||
):
|
||||
"""
|
||||
empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *,
|
||||
device=None)
|
||||
empty_like(
|
||||
prototype,
|
||||
/,
|
||||
dtype=None,
|
||||
order='K',
|
||||
subok=True,
|
||||
shape=None,
|
||||
*,
|
||||
device=None,
|
||||
)
|
||||
--
|
||||
|
||||
Return a new array with the same shape and type as a given array.
|
||||
|
||||
@@ -186,15 +195,18 @@ def empty_like(
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
|
||||
def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
|
||||
def concatenate(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"):
|
||||
"""
|
||||
concatenate(
|
||||
(a1, a2, ...),
|
||||
arrays,
|
||||
/,
|
||||
axis=0,
|
||||
out=None,
|
||||
*,
|
||||
dtype=None,
|
||||
casting="same_kind"
|
||||
casting="same_kind",
|
||||
)
|
||||
--
|
||||
|
||||
Join a sequence of arrays along an existing axis.
|
||||
|
||||
@@ -295,7 +307,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
|
||||
def inner(a, b):
|
||||
def inner(a, b, /):
|
||||
"""
|
||||
inner(a, b, /)
|
||||
|
||||
@@ -389,7 +401,7 @@ def inner(a, b):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
|
||||
def where(condition, x=None, y=None):
|
||||
def where(condition, x=None, y=None, /):
|
||||
"""
|
||||
where(condition, [x, y], /)
|
||||
|
||||
@@ -465,7 +477,7 @@ def where(condition, x=None, y=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
|
||||
def lexsort(keys, axis=None):
|
||||
def lexsort(keys, axis=-1):
|
||||
"""
|
||||
lexsort(keys, axis=-1)
|
||||
|
||||
@@ -586,7 +598,7 @@ def lexsort(keys, axis=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
|
||||
def can_cast(from_, to, casting=None):
|
||||
def can_cast(from_, to, casting="safe"):
|
||||
"""
|
||||
can_cast(from_, to, casting='safe')
|
||||
|
||||
@@ -648,7 +660,7 @@ def can_cast(from_, to, casting=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
|
||||
def min_scalar_type(a):
|
||||
def min_scalar_type(a, /):
|
||||
"""
|
||||
min_scalar_type(a, /)
|
||||
|
||||
@@ -701,19 +713,7 @@ def result_type(*arrays_and_dtypes):
|
||||
result_type(*arrays_and_dtypes)
|
||||
|
||||
Returns the type that results from applying the NumPy
|
||||
type promotion rules to the arguments.
|
||||
|
||||
Type promotion in NumPy works similarly to the rules in languages
|
||||
like C++, with some slight differences. When both scalars and
|
||||
arrays are used, the array's type takes precedence and the actual value
|
||||
of the scalar is taken into account.
|
||||
|
||||
For example, calculating 3*a, where a is an array of 32-bit floats,
|
||||
intuitively should result in a 32-bit float output. If the 3 is a
|
||||
32-bit integer, the NumPy rules indicate it can't convert losslessly
|
||||
into a 32-bit float, so a 64-bit float should be the result type.
|
||||
By examining the value of the constant, '3', we see that it fits in
|
||||
an 8-bit integer, which can be cast losslessly into the 32-bit float.
|
||||
:ref:`type promotion <arrays.promotion>` rules to the arguments.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -729,27 +729,6 @@ def result_type(*arrays_and_dtypes):
|
||||
--------
|
||||
dtype, promote_types, min_scalar_type, can_cast
|
||||
|
||||
Notes
|
||||
-----
|
||||
The specific algorithm used is as follows.
|
||||
|
||||
Categories are determined by first checking which of boolean,
|
||||
integer (int/uint), or floating point (float/complex) the maximum
|
||||
kind of all the arrays and the scalars are.
|
||||
|
||||
If there are only scalars or the maximum category of the scalars
|
||||
is higher than the maximum category of the arrays,
|
||||
the data types are combined with :func:`promote_types`
|
||||
to produce the return value.
|
||||
|
||||
Otherwise, `min_scalar_type` is called on each scalar, and
|
||||
the resulting data types are all combined with :func:`promote_types`
|
||||
to produce the return value.
|
||||
|
||||
The set of int values is not a subset of the uint values for types
|
||||
with the same number of bits, something not reflected in
|
||||
:func:`min_scalar_type`, but handled as a special case in `result_type`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
@@ -862,7 +841,7 @@ def dot(a, b, out=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
|
||||
def vdot(a, b):
|
||||
def vdot(a, b, /):
|
||||
r"""
|
||||
vdot(a, b, /)
|
||||
|
||||
@@ -925,7 +904,7 @@ def vdot(a, b):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
|
||||
def bincount(x, weights=None, minlength=None):
|
||||
def bincount(x, /, weights=None, minlength=0):
|
||||
"""
|
||||
bincount(x, /, weights=None, minlength=0)
|
||||
|
||||
@@ -1001,7 +980,7 @@ def bincount(x, weights=None, minlength=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
|
||||
def ravel_multi_index(multi_index, dims, mode=None, order=None):
|
||||
def ravel_multi_index(multi_index, dims, mode="raise", order="C"):
|
||||
"""
|
||||
ravel_multi_index(multi_index, dims, mode='raise', order='C')
|
||||
|
||||
@@ -1059,7 +1038,7 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
|
||||
def unravel_index(indices, shape=None, order=None):
|
||||
def unravel_index(indices, shape, order="C"):
|
||||
"""
|
||||
unravel_index(indices, shape, order='C')
|
||||
|
||||
@@ -1104,7 +1083,7 @@ def unravel_index(indices, shape=None, order=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
|
||||
def copyto(dst, src, casting=None, where=None):
|
||||
def copyto(dst, src, casting="same_kind", where=True):
|
||||
"""
|
||||
copyto(dst, src, casting='same_kind', where=True)
|
||||
|
||||
@@ -1156,7 +1135,7 @@ def copyto(dst, src, casting=None, where=None):
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
|
||||
def putmask(a, /, mask, values):
|
||||
"""
|
||||
putmask(a, mask, values)
|
||||
putmask(a, /, mask, values)
|
||||
|
||||
Changes elements of an array based on conditional and input values.
|
||||
|
||||
@@ -1200,7 +1179,7 @@ def putmask(a, /, mask, values):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
|
||||
def packbits(a, axis=None, bitorder='big'):
|
||||
def packbits(a, /, axis=None, bitorder="big"):
|
||||
"""
|
||||
packbits(a, /, axis=None, bitorder='big')
|
||||
|
||||
@@ -1257,7 +1236,7 @@ def packbits(a, axis=None, bitorder='big'):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
|
||||
def unpackbits(a, axis=None, count=None, bitorder='big'):
|
||||
def unpackbits(a, /, axis=None, count=None, bitorder="big"):
|
||||
"""
|
||||
unpackbits(a, /, axis=None, count=None, bitorder='big')
|
||||
|
||||
@@ -1337,9 +1316,9 @@ def unpackbits(a, axis=None, count=None, bitorder='big'):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
|
||||
def shares_memory(a, b, max_work=None):
|
||||
def shares_memory(a, b, /, max_work=-1):
|
||||
"""
|
||||
shares_memory(a, b, /, max_work=None)
|
||||
shares_memory(a, b, /, max_work=-1)
|
||||
|
||||
Determine if two arrays share memory.
|
||||
|
||||
@@ -1416,9 +1395,9 @@ def shares_memory(a, b, max_work=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
|
||||
def may_share_memory(a, b, max_work=None):
|
||||
def may_share_memory(a, b, /, max_work=0):
|
||||
"""
|
||||
may_share_memory(a, b, /, max_work=None)
|
||||
may_share_memory(a, b, /, max_work=0)
|
||||
|
||||
Determine if two arrays might share memory
|
||||
|
||||
@@ -1458,14 +1437,14 @@ def may_share_memory(a, b, max_work=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
|
||||
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
|
||||
def is_busday(dates, weekmask="1111100", holidays=None, busdaycal=None, out=None):
|
||||
"""
|
||||
is_busday(
|
||||
dates,
|
||||
weekmask='1111100',
|
||||
holidays=None,
|
||||
busdaycal=None,
|
||||
out=None
|
||||
out=None,
|
||||
)
|
||||
|
||||
Calculates which of the given dates are valid days, and which are not.
|
||||
@@ -1517,7 +1496,7 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
|
||||
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
|
||||
def busday_offset(dates, offsets, roll="raise", weekmask="1111100", holidays=None,
|
||||
busdaycal=None, out=None):
|
||||
"""
|
||||
busday_offset(
|
||||
@@ -1527,7 +1506,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
|
||||
weekmask='1111100',
|
||||
holidays=None,
|
||||
busdaycal=None,
|
||||
out=None
|
||||
out=None,
|
||||
)
|
||||
|
||||
First adjusts the date to fall on a valid day according to
|
||||
@@ -1619,7 +1598,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
|
||||
def busday_count(begindates, enddates, weekmask=None, holidays=None,
|
||||
def busday_count(begindates, enddates, weekmask="1111100", holidays=(),
|
||||
busdaycal=None, out=None):
|
||||
"""
|
||||
busday_count(
|
||||
@@ -1692,9 +1671,8 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None,
|
||||
return (begindates, enddates, weekmask, holidays, out)
|
||||
|
||||
|
||||
@array_function_from_c_func_and_dispatcher(
|
||||
_multiarray_umath.datetime_as_string)
|
||||
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
|
||||
@array_function_from_c_func_and_dispatcher(_multiarray_umath.datetime_as_string)
|
||||
def datetime_as_string(arr, unit=None, timezone="naive", casting="same_kind"):
|
||||
"""
|
||||
datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
|
||||
|
||||
@@ -1723,7 +1701,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None):
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> import pytz
|
||||
>>> from zoneinfo import ZoneInfo
|
||||
>>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
|
||||
>>> d
|
||||
array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
|
||||
@@ -1736,9 +1714,9 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None):
|
||||
'2002-10-27T07:30Z'], dtype='<U35')
|
||||
|
||||
Note that we picked datetimes that cross a DST boundary. Passing in a
|
||||
``pytz`` timezone object will print the appropriate offset
|
||||
``ZoneInfo`` object will print the appropriate offset
|
||||
|
||||
>>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
|
||||
>>> np.datetime_as_string(d, timezone=ZoneInfo('US/Eastern'))
|
||||
array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
|
||||
'2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,8 +10,7 @@ import warnings
|
||||
import numpy as np
|
||||
from numpy.exceptions import AxisError
|
||||
|
||||
from . import multiarray, numerictypes, overrides, shape_base, umath
|
||||
from . import numerictypes as nt
|
||||
from . import multiarray, numerictypes, numerictypes as nt, overrides, shape_base, umath
|
||||
from ._ufunc_config import errstate
|
||||
from .multiarray import ( # noqa: F401
|
||||
ALLOW_THREADS,
|
||||
@@ -524,11 +523,11 @@ def count_nonzero(a, axis=None, *, keepdims=False):
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.count_nonzero(np.eye(4))
|
||||
4
|
||||
np.int64(4)
|
||||
>>> a = np.array([[0, 1, 7, 0],
|
||||
... [3, 0, 2, 19]])
|
||||
>>> np.count_nonzero(a)
|
||||
5
|
||||
np.int64(5)
|
||||
>>> np.count_nonzero(a, axis=0)
|
||||
array([1, 1, 2, 1])
|
||||
>>> np.count_nonzero(a, axis=1)
|
||||
@@ -894,12 +893,12 @@ def convolve(a, v, mode='full'):
|
||||
|
||||
"""
|
||||
a, v = array(a, copy=None, ndmin=1), array(v, copy=None, ndmin=1)
|
||||
if (len(v) > len(a)):
|
||||
a, v = v, a
|
||||
if len(a) == 0:
|
||||
raise ValueError('a cannot be empty')
|
||||
if len(v) == 0:
|
||||
raise ValueError('v cannot be empty')
|
||||
if len(v) > len(a):
|
||||
a, v = v, a
|
||||
return multiarray.correlate(a, v[::-1], mode)
|
||||
|
||||
|
||||
@@ -1107,10 +1106,9 @@ def tensordot(a, b, axes=2):
|
||||
|
||||
An extended example taking advantage of the overloading of + and \\*:
|
||||
|
||||
>>> a = np.array(range(1, 9))
|
||||
>>> a.shape = (2, 2, 2)
|
||||
>>> a = np.array(range(1, 9)).reshape((2, 2, 2))
|
||||
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
|
||||
>>> A.shape = (2, 2)
|
||||
>>> A = A.reshape((2, 2))
|
||||
>>> a; A
|
||||
array([[[1, 2],
|
||||
[3, 4]],
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user