diff --git a/lib/python3.10/site-packages/av/__init__.py b/lib/python3.10/site-packages/av/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e2f9e5a6de13c9b92276b5edaa940d6f08694dfb --- /dev/null +++ b/lib/python3.10/site-packages/av/__init__.py @@ -0,0 +1,69 @@ +# MUST import the core before anything else in order to initialize the underlying +# library that is being wrapped. +from av._core import time_base, library_versions, ffmpeg_version_info + +# Capture logging (by importing it). +from av import logging + +# For convenience, import all common attributes. +from av.about import __version__ +from av.audio.codeccontext import AudioCodecContext +from av.audio.fifo import AudioFifo +from av.audio.format import AudioFormat +from av.audio.frame import AudioFrame +from av.audio.layout import AudioLayout +from av.audio.resampler import AudioResampler +from av.audio.stream import AudioStream +from av.bitstream import BitStreamFilterContext, bitstream_filters_available +from av.codec.codec import Codec, codecs_available +from av.codec.context import CodecContext +from av.codec.hwaccel import HWConfig +from av.container import open +from av.format import ContainerFormat, formats_available +from av.packet import Packet +from av.error import * # noqa: F403; This is limited to exception types. +from av.video.codeccontext import VideoCodecContext +from av.video.format import VideoFormat +from av.video.frame import VideoFrame +from av.video.stream import VideoStream + +__all__ = ( + "__version__", + "time_base", + "ffmpeg_version_info", + "library_versions", + "AudioCodecContext", + "AudioFifo", + "AudioFormat", + "AudioFrame", + "AudioLayout", + "AudioResampler", + "AudioStream", + "BitStreamFilterContext", + "bitstream_filters_available", + "Codec", + "codecs_available", + "CodecContext", + "open", + "ContainerFormat", + "formats_available", + "Packet", + "VideoCodecContext", + "VideoFormat", + "VideoFrame", + "VideoStream", +) + + +def get_include() -> str: + """ + Returns the path to the `include` folder to be used when building extensions to av. + """ + import os + + # Installed package + include_path = os.path.join(os.path.dirname(__file__), "include") + if os.path.exists(include_path): + return include_path + # Running from source directory + return os.path.join(os.path.dirname(__file__), os.pardir, "include") diff --git a/lib/python3.10/site-packages/av/_core.pyx b/lib/python3.10/site-packages/av/_core.pyx new file mode 100644 index 0000000000000000000000000000000000000000..edca772f58e0d5ce3e9a8c9e76b69e1f0da2fdea --- /dev/null +++ b/lib/python3.10/site-packages/av/_core.pyx @@ -0,0 +1,65 @@ +cimport libav as lib + +# Initialise libraries. +lib.avformat_network_init() +lib.avdevice_register_all() + +# Exports. +time_base = lib.AV_TIME_BASE + + +cdef decode_version(v): + if v < 0: + return (-1, -1, -1) + + cdef int major = (v >> 16) & 0xff + cdef int minor = (v >> 8) & 0xff + cdef int micro = (v) & 0xff + + return (major, minor, micro) + +# Return an informative version string. +# This usually is the actual release version number or a git commit +# description. This string has no fixed format and can change any time. It +# should never be parsed by code. +ffmpeg_version_info = lib.av_version_info() + +library_meta = { + "libavutil": dict( + version=decode_version(lib.avutil_version()), + configuration=lib.avutil_configuration(), + license=lib.avutil_license() + ), + "libavcodec": dict( + version=decode_version(lib.avcodec_version()), + configuration=lib.avcodec_configuration(), + license=lib.avcodec_license() + ), + "libavformat": dict( + version=decode_version(lib.avformat_version()), + configuration=lib.avformat_configuration(), + license=lib.avformat_license() + ), + "libavdevice": dict( + version=decode_version(lib.avdevice_version()), + configuration=lib.avdevice_configuration(), + license=lib.avdevice_license() + ), + "libavfilter": dict( + version=decode_version(lib.avfilter_version()), + configuration=lib.avfilter_configuration(), + license=lib.avfilter_license() + ), + "libswscale": dict( + version=decode_version(lib.swscale_version()), + configuration=lib.swscale_configuration(), + license=lib.swscale_license() + ), + "libswresample": dict( + version=decode_version(lib.swresample_version()), + configuration=lib.swresample_configuration(), + license=lib.swresample_license() + ), +} + +library_versions = {name: meta["version"] for name, meta in library_meta.items()} diff --git a/lib/python3.10/site-packages/av/bitstream.pyx b/lib/python3.10/site-packages/av/bitstream.pyx new file mode 100644 index 0000000000000000000000000000000000000000..b5361f8c2fb323d68897fcc86a8fa7a67e78c57c --- /dev/null +++ b/lib/python3.10/site-packages/av/bitstream.pyx @@ -0,0 +1,95 @@ +cimport libav as lib +from libc.errno cimport EAGAIN + +from av.error cimport err_check +from av.packet cimport Packet +from av.stream cimport Stream + + +cdef class BitStreamFilterContext: + """ + Initializes a bitstream filter: a way to directly modify packet data. + + Wraps :ffmpeg:`AVBSFContext` + + :param Stream in_stream: A stream that defines the input codec for the bitfilter. + :param Stream out_stream: A stream whose codec is overwritten using the output parameters from the bitfilter. + """ + def __cinit__(self, filter_description, Stream in_stream=None, Stream out_stream=None): + cdef int res + cdef char *filter_str = filter_description + + with nogil: + res = lib.av_bsf_list_parse_str(filter_str, &self.ptr) + err_check(res) + + if in_stream is not None: + with nogil: + res = lib.avcodec_parameters_copy(self.ptr.par_in, in_stream.ptr.codecpar) + err_check(res) + + with nogil: + res = lib.av_bsf_init(self.ptr) + err_check(res) + + if out_stream is not None: + with nogil: + res = lib.avcodec_parameters_copy(out_stream.ptr.codecpar, self.ptr.par_out) + err_check(res) + lib.avcodec_parameters_to_context(out_stream.codec_context.ptr, out_stream.ptr.codecpar) + + def __dealloc__(self): + if self.ptr: + lib.av_bsf_free(&self.ptr) + + cpdef filter(self, Packet packet=None): + """ + Processes a packet based on the filter_description set during initialization. + Multiple packets may be created. + + :type: list[Packet] + """ + cdef int res + cdef Packet new_packet + + with nogil: + res = lib.av_bsf_send_packet(self.ptr, packet.ptr if packet is not None else NULL) + err_check(res) + + output = [] + while True: + new_packet = Packet() + with nogil: + res = lib.av_bsf_receive_packet(self.ptr, new_packet.ptr) + + if res == -EAGAIN or res == lib.AVERROR_EOF: + return output + + err_check(res) + if res: + return output + + output.append(new_packet) + + cpdef flush(self): + """ + Reset the internal state of the filter. + Should be called e.g. when seeking. + Can be used to make the filter usable again after draining it with EOF marker packet. + """ + lib.av_bsf_flush(self.ptr) + +cdef get_filter_names(): + names = set() + cdef const lib.AVBitStreamFilter *ptr + cdef void *opaque = NULL + while True: + ptr = lib.av_bsf_iterate(&opaque) + if ptr: + names.add(ptr.name) + else: + break + + return names + +bitstream_filters_available = get_filter_names() diff --git a/lib/python3.10/site-packages/av/buffer.pxd b/lib/python3.10/site-packages/av/buffer.pxd new file mode 100644 index 0000000000000000000000000000000000000000..cfab07ca065b699eb1ce4f9f4b4b7a068f04d766 --- /dev/null +++ b/lib/python3.10/site-packages/av/buffer.pxd @@ -0,0 +1,6 @@ + +cdef class Buffer: + + cdef size_t _buffer_size(self) + cdef void* _buffer_ptr(self) + cdef bint _buffer_writable(self) diff --git a/lib/python3.10/site-packages/av/buffer.pyi b/lib/python3.10/site-packages/av/buffer.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bc1090d1deac54dcfc364fea49ee31bca526c6f8 --- /dev/null +++ b/lib/python3.10/site-packages/av/buffer.pyi @@ -0,0 +1,9 @@ +# When Python 3.12 becomes our lowest supported version, we could make this +# class inherit `collections.abc.Buffer`. + +class Buffer: + buffer_size: int + buffer_ptr: int + def update(self, input: bytes) -> None: ... + def __buffer__(self, flags: int) -> memoryview: ... + def __bytes__(self) -> bytes: ... diff --git a/lib/python3.10/site-packages/av/bytesource.pxd b/lib/python3.10/site-packages/av/bytesource.pxd new file mode 100644 index 0000000000000000000000000000000000000000..050baab352f74227f7cc253a80b0b125e45894ce --- /dev/null +++ b/lib/python3.10/site-packages/av/bytesource.pxd @@ -0,0 +1,14 @@ +from cpython.buffer cimport Py_buffer + + +cdef class ByteSource: + + cdef object owner + + cdef bint has_view + cdef Py_buffer view + + cdef unsigned char *ptr + cdef size_t length + +cdef ByteSource bytesource(object, bint allow_none=*) diff --git a/lib/python3.10/site-packages/av/bytesource.pyx b/lib/python3.10/site-packages/av/bytesource.pyx new file mode 100644 index 0000000000000000000000000000000000000000..9192c6d1a51b18b19cddf91ee556462e500af369 --- /dev/null +++ b/lib/python3.10/site-packages/av/bytesource.pyx @@ -0,0 +1,43 @@ +from cpython.buffer cimport ( + PyBUF_SIMPLE, + PyBuffer_Release, + PyObject_CheckBuffer, + PyObject_GetBuffer, +) + + +cdef class ByteSource: + def __cinit__(self, owner): + self.owner = owner + + try: + self.ptr = owner + except TypeError: + pass + else: + self.length = len(owner) + return + + if PyObject_CheckBuffer(owner): + # Can very likely use PyBUF_ND instead of PyBUF_SIMPLE + res = PyObject_GetBuffer(owner, &self.view, PyBUF_SIMPLE) + if not res: + self.has_view = True + self.ptr = self.view.buf + self.length = self.view.len + return + + raise TypeError("expected bytes, bytearray or memoryview") + + def __dealloc__(self): + if self.has_view: + PyBuffer_Release(&self.view) + + +cdef ByteSource bytesource(obj, bint allow_none=False): + if allow_none and obj is None: + return + elif isinstance(obj, ByteSource): + return obj + else: + return ByteSource(obj) diff --git a/lib/python3.10/site-packages/av/datasets.py b/lib/python3.10/site-packages/av/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..5954a9c98370d8424c9becce2cd6010b03700688 --- /dev/null +++ b/lib/python3.10/site-packages/av/datasets.py @@ -0,0 +1,123 @@ +import errno +import logging +import os +import sys +from typing import Iterator +from urllib.request import urlopen + +log = logging.getLogger(__name__) + + +def iter_data_dirs(check_writable: bool = False) -> Iterator[str]: + try: + yield os.environ["PYAV_TESTDATA_DIR"] + except KeyError: + pass + + if os.name == "nt": + yield os.path.join(sys.prefix, "pyav", "datasets") + return + + bases = [ + "/usr/local/share", + "/usr/local/lib", + "/usr/share", + "/usr/lib", + ] + + # Prefer the local virtualenv. + if hasattr(sys, "real_prefix"): + bases.insert(0, sys.prefix) + + for base in bases: + dir_ = os.path.join(base, "pyav", "datasets") + if check_writable: + if os.path.exists(dir_): + if not os.access(dir_, os.W_OK): + continue + else: + if not os.access(base, os.W_OK): + continue + yield dir_ + + yield os.path.join(os.path.expanduser("~"), ".pyav", "datasets") + + +def cached_download(url: str, name: str) -> str: + """Download the data at a URL, and cache it under the given name. + + The file is stored under `pyav/test` with the given name in the directory + :envvar:`PYAV_TESTDATA_DIR`, or the first that is writeable of: + + - the current virtualenv + - ``/usr/local/share`` + - ``/usr/local/lib`` + - ``/usr/share`` + - ``/usr/lib`` + - the user's home + + """ + + clean_name = os.path.normpath(name) + if clean_name != name: + raise ValueError(f"{name} is not normalized.") + + for dir_ in iter_data_dirs(): + path = os.path.join(dir_, name) + if os.path.exists(path): + return path + + dir_ = next(iter_data_dirs(True)) + path = os.path.join(dir_, name) + + log.info(f"Downloading {url} to {path}") + + response = urlopen(url) + if response.getcode() != 200: + raise ValueError(f"HTTP {response.getcode()}") + + dir_ = os.path.dirname(path) + try: + os.makedirs(dir_) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + tmp_path = path + ".tmp" + with open(tmp_path, "wb") as fh: + while True: + chunk = response.read(8196) + if chunk: + fh.write(chunk) + else: + break + + os.rename(tmp_path, path) + + return path + + +def fate(name: str) -> str: + """Download and return a path to a sample from the FFmpeg test suite. + + Data is handled by :func:`cached_download`. + + See the `FFmpeg Automated Test Environment `_ + + """ + return cached_download( + "http://fate.ffmpeg.org/fate-suite/" + name, + os.path.join("fate-suite", name.replace("/", os.path.sep)), + ) + + +def curated(name: str) -> str: + """Download and return a path to a sample that is curated by the PyAV developers. + + Data is handled by :func:`cached_download`. + + """ + return cached_download( + "https://pyav.org/datasets/" + name, + os.path.join("pyav-curated", name.replace("/", os.path.sep)), + ) diff --git a/lib/python3.10/site-packages/av/descriptor.pxd b/lib/python3.10/site-packages/av/descriptor.pxd new file mode 100644 index 0000000000000000000000000000000000000000..404f646afce60e3703814c4ae6c493dc8757b5c9 --- /dev/null +++ b/lib/python3.10/site-packages/av/descriptor.pxd @@ -0,0 +1,20 @@ +cimport libav as lib + + +cdef class Descriptor: + + # These are present as: + # - AVCodecContext.av_class (same as avcodec_get_class()) + # - AVFormatContext.av_class (same as avformat_get_class()) + # - AVFilterContext.av_class (same as avfilter_get_class()) + # - AVCodec.priv_class + # - AVOutputFormat.priv_class + # - AVInputFormat.priv_class + # - AVFilter.priv_class + + cdef const lib.AVClass *ptr + + cdef object _options # Option list cache. + + +cdef Descriptor wrap_avclass(const lib.AVClass*) diff --git a/lib/python3.10/site-packages/av/descriptor.pyi b/lib/python3.10/site-packages/av/descriptor.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ae1998391263a4d332a3f8c01c87495d4ef5c1cd --- /dev/null +++ b/lib/python3.10/site-packages/av/descriptor.pyi @@ -0,0 +1,7 @@ +from typing import NoReturn + +from .option import Option + +class Descriptor: + name: str + options: tuple[Option, ...] diff --git a/lib/python3.10/site-packages/av/dictionary.pyi b/lib/python3.10/site-packages/av/dictionary.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a6868bea2c9e122963a58410707f9cbd865b49fe --- /dev/null +++ b/lib/python3.10/site-packages/av/dictionary.pyi @@ -0,0 +1,10 @@ +from collections.abc import MutableMapping +from typing import Iterator + +class Dictionary(MutableMapping[str, str]): + def __getitem__(self, key: str) -> str: ... + def __setitem__(self, key: str, value: str) -> None: ... + def __delitem__(self, key: str) -> None: ... + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[str]: ... + def __repr__(self) -> str: ... diff --git a/lib/python3.10/site-packages/av/error.pxd b/lib/python3.10/site-packages/av/error.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d9a542a36ab6db47d7226da67b07933f3b569ceb --- /dev/null +++ b/lib/python3.10/site-packages/av/error.pxd @@ -0,0 +1,3 @@ + +cdef int stash_exception(exc_info=*) +cpdef int err_check(int res, filename=*) except -1 diff --git a/lib/python3.10/site-packages/av/error.pyi b/lib/python3.10/site-packages/av/error.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e18f7334c820a497c476fdeaeb860ffe4fb02db4 --- /dev/null +++ b/lib/python3.10/site-packages/av/error.pyi @@ -0,0 +1,72 @@ +import builtins +from enum import Enum + +classes: dict[int, Exception] + +def code_to_tag(code: int) -> bytes: ... +def tag_to_code(tag: bytes) -> int: ... +def err_check(res: int, filename: str | None = None) -> int: ... + +class FFmpegError(Exception): + errno: int | None + strerror: str | None + filename: str + log: tuple[int, tuple[int, str, str] | None] + + def __init__( + self, + code: int, + message: str, + filename: str | None = None, + log: tuple[int, tuple[int, str, str] | None] | None = None, + ) -> None: ... + +class LookupError(FFmpegError): ... +class HTTPError(FFmpegError): ... +class HTTPClientError(FFmpegError): ... +class UndefinedError(FFmpegError): ... +class InvalidDataError(FFmpegError, builtins.ValueError): ... +class BugError(FFmpegError, builtins.RuntimeError): ... +class BufferTooSmallError(FFmpegError, builtins.ValueError): ... +class BSFNotFoundError(LookupError): ... +class DecoderNotFoundError(LookupError): ... +class DemuxerNotFoundError(LookupError): ... +class EncoderNotFoundError(LookupError): ... +class ExitError(FFmpegError): ... +class ExternalError(FFmpegError): ... +class FilterNotFoundError(LookupError): ... +class MuxerNotFoundError(LookupError): ... +class OptionNotFoundError(LookupError): ... +class PatchWelcomeError(FFmpegError): ... +class ProtocolNotFoundError(LookupError): ... +class UnknownError(FFmpegError): ... +class ExperimentalError(FFmpegError): ... +class InputChangedError(FFmpegError): ... +class OutputChangedError(FFmpegError): ... +class HTTPBadRequestError(HTTPClientError): ... +class HTTPUnauthorizedError(HTTPClientError): ... +class HTTPForbiddenError(HTTPClientError): ... +class HTTPNotFoundError(HTTPClientError): ... +class HTTPOtherClientError(HTTPClientError): ... +class HTTPServerError(HTTPError): ... +class PyAVCallbackError(FFmpegError, builtins.RuntimeError): ... +class BrokenPipeError(FFmpegError, builtins.BrokenPipeError): ... +class ChildProcessError(FFmpegError, builtins.ChildProcessError): ... +class ConnectionAbortedError(FFmpegError, builtins.ConnectionAbortedError): ... +class ConnectionRefusedError(FFmpegError, builtins.ConnectionRefusedError): ... +class ConnectionResetError(FFmpegError, builtins.ConnectionResetError): ... +class BlockingIOError(FFmpegError, builtins.BlockingIOError): ... +class EOFError(FFmpegError, builtins.EOFError): ... +class FileExistsError(FFmpegError, builtins.FileExistsError): ... +class FileNotFoundError(FFmpegError, builtins.FileNotFoundError): ... +class InterruptedError(FFmpegError, builtins.InterruptedError): ... +class IsADirectoryError(FFmpegError, builtins.IsADirectoryError): ... +class MemoryError(FFmpegError, builtins.MemoryError): ... +class NotADirectoryError(FFmpegError, builtins.NotADirectoryError): ... +class NotImplementedError(FFmpegError, builtins.NotImplementedError): ... +class OverflowError(FFmpegError, builtins.OverflowError): ... +class OSError(FFmpegError, builtins.OSError): ... +class PermissionError(FFmpegError, builtins.PermissionError): ... +class ProcessLookupError(FFmpegError, builtins.ProcessLookupError): ... +class TimeoutError(FFmpegError, builtins.TimeoutError): ... +class ValueError(FFmpegError, builtins.ValueError): ... diff --git a/lib/python3.10/site-packages/av/error.pyx b/lib/python3.10/site-packages/av/error.pyx new file mode 100644 index 0000000000000000000000000000000000000000..b8b41520742988ad4a5434f9ab085d0149d270c8 --- /dev/null +++ b/lib/python3.10/site-packages/av/error.pyx @@ -0,0 +1,430 @@ +cimport libav as lib +from libc.stdlib cimport free, malloc + +from av.logging cimport get_last_error + +import errno +import os +import sys +import traceback +from threading import local + +# Will get extended with all of the exceptions. +__all__ = [ + "ErrorType", "FFmpegError", "LookupError", "HTTPError", "HTTPClientError", + "UndefinedError", +] + + +cpdef code_to_tag(int code): + """Convert an integer error code into 4-byte tag. + + >>> code_to_tag(1953719668) + b'test' + + """ + return bytes(( + code & 0xff, + (code >> 8) & 0xff, + (code >> 16) & 0xff, + (code >> 24) & 0xff, + )) + +cpdef tag_to_code(bytes tag): + """Convert a 4-byte error tag into an integer code. + + >>> tag_to_code(b'test') + 1953719668 + + """ + if len(tag) != 4: + raise ValueError("Error tags are 4 bytes.") + return ( + (tag[0]) + + (tag[1] << 8) + + (tag[2] << 16) + + (tag[3] << 24) + ) + + +class FFmpegError(Exception): + """Exception class for errors from within FFmpeg. + + .. attribute:: errno + + FFmpeg's integer error code. + + .. attribute:: strerror + + FFmpeg's error message. + + .. attribute:: filename + + The filename that was being operated on (if available). + + .. attribute:: log + + The tuple from :func:`av.logging.get_last_log`, or ``None``. + + """ + + def __init__(self, code, message, filename=None, log=None): + self.errno = code + self.strerror = message + + args = [code, message] + if filename or log: + args.append(filename) + if log: + args.append(log) + super(FFmpegError, self).__init__(*args) + self.args = tuple(args) # FileNotFoundError/etc. only pulls 2 args. + + @property + def filename(self): + try: + return self.args[2] + except IndexError: + pass + + @property + def log(self): + try: + return self.args[3] + except IndexError: + pass + + def __str__(self): + msg = "" + if self.errno is not None: + msg = f"{msg}[Errno {self.errno}] " + if self.strerror is not None: + msg = f"{msg}{self.strerror}" + if self.filename: + msg = f"{msg}: {self.filename!r}" + if self.log: + msg = f"{msg}; last error log: [{self.log[1].strip()}] {self.log[2].strip()}" + + return msg + + +# Our custom error, used in callbacks. +cdef int c_PYAV_STASHED_ERROR = tag_to_code(b"PyAV") +cdef str PYAV_STASHED_ERROR_message = "Error in PyAV callback" + + +# Bases for the FFmpeg-based exceptions. +class LookupError(FFmpegError, LookupError): + pass + + +class HTTPError(FFmpegError): + pass + + +class HTTPClientError(FFmpegError): + pass + + +# Tuples of (enum_name, enum_value, exc_name, exc_base). +_ffmpeg_specs = ( + ("BSF_NOT_FOUND", -lib.AVERROR_BSF_NOT_FOUND, "BSFNotFoundError", LookupError), + ("BUG", -lib.AVERROR_BUG, None, RuntimeError), + ("BUFFER_TOO_SMALL", -lib.AVERROR_BUFFER_TOO_SMALL, None, ValueError), + ("DECODER_NOT_FOUND", -lib.AVERROR_DECODER_NOT_FOUND, None, LookupError), + ("DEMUXER_NOT_FOUND", -lib.AVERROR_DEMUXER_NOT_FOUND, None, LookupError), + ("ENCODER_NOT_FOUND", -lib.AVERROR_ENCODER_NOT_FOUND, None, LookupError), + ("EOF", -lib.AVERROR_EOF, "EOFError", EOFError), + ("EXIT", -lib.AVERROR_EXIT, None, None), + ("EXTERNAL", -lib.AVERROR_EXTERNAL, None, None), + ("FILTER_NOT_FOUND", -lib.AVERROR_FILTER_NOT_FOUND, None, LookupError), + ("INVALIDDATA", -lib.AVERROR_INVALIDDATA, "InvalidDataError", ValueError), + ("MUXER_NOT_FOUND", -lib.AVERROR_MUXER_NOT_FOUND, None, LookupError), + ("OPTION_NOT_FOUND", -lib.AVERROR_OPTION_NOT_FOUND, None, LookupError), + ("PATCHWELCOME", -lib.AVERROR_PATCHWELCOME, "PatchWelcomeError", None), + ("PROTOCOL_NOT_FOUND", -lib.AVERROR_PROTOCOL_NOT_FOUND, None, LookupError), + ("UNKNOWN", -lib.AVERROR_UNKNOWN, None, None), + ("EXPERIMENTAL", -lib.AVERROR_EXPERIMENTAL, None, None), + ("INPUT_CHANGED", -lib.AVERROR_INPUT_CHANGED, None, None), + ("OUTPUT_CHANGED", -lib.AVERROR_OUTPUT_CHANGED, None, None), + ("HTTP_BAD_REQUEST", -lib.AVERROR_HTTP_BAD_REQUEST, "HTTPBadRequestError", HTTPClientError), + ("HTTP_UNAUTHORIZED", -lib.AVERROR_HTTP_UNAUTHORIZED, "HTTPUnauthorizedError", HTTPClientError), + ("HTTP_FORBIDDEN", -lib.AVERROR_HTTP_FORBIDDEN, "HTTPForbiddenError", HTTPClientError), + ("HTTP_NOT_FOUND", -lib.AVERROR_HTTP_NOT_FOUND, "HTTPNotFoundError", HTTPClientError), + ("HTTP_OTHER_4XX", -lib.AVERROR_HTTP_OTHER_4XX, "HTTPOtherClientError", HTTPClientError), + ("HTTP_SERVER_ERROR", -lib.AVERROR_HTTP_SERVER_ERROR, "HTTPServerError", HTTPError), + ("PYAV_CALLBACK", c_PYAV_STASHED_ERROR, "PyAVCallbackError", RuntimeError), +) + +cdef sentinel = object() + + +class EnumType(type): + def __new__(mcl, name, bases, attrs, *args): + # Just adapting the method signature. + return super().__new__(mcl, name, bases, attrs) + + def __init__(self, name, bases, attrs, items): + self._by_name = {} + self._by_value = {} + self._all = [] + + for spec in items: + self._create(*spec) + + def _create(self, name, value, doc=None, by_value_only=False): + # We only have one instance per value. + try: + item = self._by_value[value] + except KeyError: + item = self(sentinel, name, value, doc) + self._by_value[value] = item + + return item + + def __len__(self): + return len(self._all) + + def __iter__(self): + return iter(self._all) + + def __getitem__(self, key): + if isinstance(key, str): + return self._by_name[key] + + if isinstance(key, int): + try: + return self._by_value[key] + except KeyError: + pass + + raise KeyError(key) + + if isinstance(key, self): + return key + + raise TypeError(f"{self.__name__} indices must be str, int, or itself") + + def _get(self, long value, bint create=False): + try: + return self._by_value[value] + except KeyError: + pass + + if not create: + return + + return self._create(f"{self.__name__.upper()}_{value}", value, by_value_only=True) + + def get(self, key, default=None, create=False): + try: + return self[key] + except KeyError: + if create: + return self._get(key, create=True) + return default + + +cdef class EnumItem: + """An enumeration of FFmpeg's error types. + +.. attribute:: tag + + The FFmpeg byte tag for the error. + +.. attribute:: strerror + + The error message that would be returned. + +""" + cdef readonly str name + cdef readonly int value + + def __cinit__(self, sentinel_, str name, int value, doc=None): + if sentinel_ is not sentinel: + raise RuntimeError(f"Cannot instantiate {self.__class__.__name__}.") + + self.name = name + self.value = value + self.__doc__ = doc + + def __repr__(self): + return f"<{self.__class__.__module__}.{self.__class__.__name__}:{self.name}(0x{self.value:x})>" + + def __str__(self): + return self.name + + def __int__(self): + return self.value + + @property + def tag(self): + return code_to_tag(self.value) + + +ErrorType = EnumType("ErrorType", (EnumItem, ), {"__module__": __name__}, [x[:2] for x in _ffmpeg_specs]) + + +for enum in ErrorType: + # Mimick the errno module. + globals()[enum.name] = enum + if enum.value == c_PYAV_STASHED_ERROR: + enum.strerror = PYAV_STASHED_ERROR_message + else: + enum.strerror = lib.av_err2str(-enum.value) + + +# Mimick the builtin exception types. +# See https://www.python.org/dev/peps/pep-3151/#new-exception-classes +# Use the named ones we have, otherwise default to OSError for anything in errno. + +# See this command for the count of POSIX codes used: +# +# egrep -IR 'AVERROR\(E[A-Z]+\)' vendor/ffmpeg-4.2 |\ +# sed -E 's/.*AVERROR\((E[A-Z]+)\).*/\1/' | \ +# sort | uniq -c +# +# The biggest ones that don't map to PEP 3151 builtins: +# +# 2106 EINVAL -> ValueError +# 649 EIO -> IOError (if it is distinct from OSError) +# 4080 ENOMEM -> MemoryError +# 340 ENOSYS -> NotImplementedError +# 35 ERANGE -> OverflowError + +classes = {} + + +def _extend_builtin(name, codes): + base = getattr(__builtins__, name, OSError) + cls = type(name, (FFmpegError, base), dict(__module__=__name__)) + + # Register in builder. + for code in codes: + classes[code] = cls + + # Register in module. + globals()[name] = cls + __all__.append(name) + + return cls + + +# PEP 3151 builtins. +_extend_builtin("PermissionError", (errno.EACCES, errno.EPERM)) +_extend_builtin("BlockingIOError", (errno.EAGAIN, errno.EALREADY, errno.EINPROGRESS, errno.EWOULDBLOCK)) +_extend_builtin("ChildProcessError", (errno.ECHILD, )) +_extend_builtin("ConnectionAbortedError", (errno.ECONNABORTED, )) +_extend_builtin("ConnectionRefusedError", (errno.ECONNREFUSED, )) +_extend_builtin("ConnectionResetError", (errno.ECONNRESET, )) +_extend_builtin("FileExistsError", (errno.EEXIST, )) +_extend_builtin("InterruptedError", (errno.EINTR, )) +_extend_builtin("IsADirectoryError", (errno.EISDIR, )) +_extend_builtin("FileNotFoundError", (errno.ENOENT, )) +_extend_builtin("NotADirectoryError", (errno.ENOTDIR, )) +_extend_builtin("BrokenPipeError", (errno.EPIPE, errno.ESHUTDOWN)) +_extend_builtin("ProcessLookupError", (errno.ESRCH, )) +_extend_builtin("TimeoutError", (errno.ETIMEDOUT, )) + +# Other obvious ones. +_extend_builtin("ValueError", (errno.EINVAL, )) +_extend_builtin("MemoryError", (errno.ENOMEM, )) +_extend_builtin("NotImplementedError", (errno.ENOSYS, )) +_extend_builtin("OverflowError", (errno.ERANGE, )) + +# The rest of them (for now) +_extend_builtin("OSError", [code for code in errno.errorcode if code not in classes]) + +# Classes for the FFmpeg errors. +for enum_name, code, name, base in _ffmpeg_specs: + name = name or enum_name.title().replace("_", "") + "Error" + + if base is None: + bases = (FFmpegError,) + elif issubclass(base, FFmpegError): + bases = (base,) + else: + bases = (FFmpegError, base) + + cls = type(name, bases, {"__module__": __name__}) + + # Register in builder. + classes[code] = cls + + # Register in module. + globals()[name] = cls + __all__.append(name) + +del _ffmpeg_specs + + +# Storage for stashing. +cdef object _local = local() +cdef int _err_count = 0 + +cdef int stash_exception(exc_info=None): + global _err_count + + existing = getattr(_local, "exc_info", None) + if existing is not None: + print >> sys.stderr, "PyAV library exception being dropped:" + traceback.print_exception(*existing) + _err_count -= 1 # Balance out the +=1 that is coming. + + exc_info = exc_info or sys.exc_info() + _local.exc_info = exc_info + if exc_info: + _err_count += 1 + + return -c_PYAV_STASHED_ERROR + + +cdef int _last_log_count = 0 + +cpdef int err_check(int res, filename=None) except -1: + """Raise appropriate exceptions from library return code.""" + + global _err_count + global _last_log_count + + # Check for stashed exceptions. + if _err_count: + exc_info = getattr(_local, "exc_info", None) + if exc_info is not None: + _err_count -= 1 + _local.exc_info = None + raise exc_info[0], exc_info[1], exc_info[2] + + if res >= 0: + return res + + # Grab details from the last log. + log_count, last_log = get_last_error() + if log_count > _last_log_count: + _last_log_count = log_count + log = last_log + else: + log = None + + cdef int code = -res + cdef char* error_buffer = malloc(lib.AV_ERROR_MAX_STRING_SIZE * sizeof(char)) + if error_buffer == NULL: + raise MemoryError() + + try: + if code == c_PYAV_STASHED_ERROR: + message = PYAV_STASHED_ERROR_message + else: + lib.av_strerror(res, error_buffer, lib.AV_ERROR_MAX_STRING_SIZE) + # Fallback to OS error string if no message + message = error_buffer or os.strerror(code) + + cls = classes.get(code, UndefinedError) + raise cls(code, message, filename, log) + finally: + free(error_buffer) + + +class UndefinedError(FFmpegError): + """Fallback exception type in case FFmpeg returns an error we don't know about.""" + pass diff --git a/lib/python3.10/site-packages/av/format.pyx b/lib/python3.10/site-packages/av/format.pyx new file mode 100644 index 0000000000000000000000000000000000000000..464e34f492eda6f5f86a583ed2a1d2d10bbe67ff --- /dev/null +++ b/lib/python3.10/site-packages/av/format.pyx @@ -0,0 +1,170 @@ +cimport libav as lib + +from av.descriptor cimport wrap_avclass + +from enum import Flag + + +cdef object _cinit_bypass_sentinel = object() + +cdef ContainerFormat build_container_format(lib.AVInputFormat* iptr, lib.AVOutputFormat* optr): + if not iptr and not optr: + raise ValueError("needs input format or output format") + cdef ContainerFormat format = ContainerFormat.__new__(ContainerFormat, _cinit_bypass_sentinel) + format.iptr = iptr + format.optr = optr + format.name = optr.name if optr else iptr.name + return format + + +class Flags(Flag): + no_file = lib.AVFMT_NOFILE + need_number: "Needs '%d' in filename." = lib.AVFMT_NEEDNUMBER + show_ids: "Show format stream IDs numbers." = lib.AVFMT_SHOW_IDS + global_header: "Format wants global header." = lib.AVFMT_GLOBALHEADER + no_timestamps: "Format does not need / have any timestamps." = lib.AVFMT_NOTIMESTAMPS + generic_index: "Use generic index building code." = lib.AVFMT_GENERIC_INDEX + ts_discont: "Format allows timestamp discontinuities" = lib.AVFMT_TS_DISCONT + variable_fps: "Format allows variable fps." = lib.AVFMT_VARIABLE_FPS + no_dimensions: "Format does not need width/height" = lib.AVFMT_NODIMENSIONS + no_streams: "Format does not require any streams" = lib.AVFMT_NOSTREAMS + no_bin_search: "Format does not allow to fall back on binary search via read_timestamp" = lib.AVFMT_NOBINSEARCH + no_gen_search: "Format does not allow to fall back on generic search" = lib.AVFMT_NOGENSEARCH + no_byte_seek: "Format does not allow seeking by bytes" = lib.AVFMT_NO_BYTE_SEEK + allow_flush: "Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function." = lib.AVFMT_ALLOW_FLUSH + ts_nonstrict: "Format does not require strictly increasing timestamps, but they must still be monotonic." = lib.AVFMT_TS_NONSTRICT + ts_negative: "Format allows muxing negative timestamps." = lib.AVFMT_TS_NEGATIVE + # If not set the timestamp will be shifted in `av_write_frame()` and `av_interleaved_write_frame()` + # so they start from 0. The user or muxer can override this through AVFormatContext.avoid_negative_ts + seek_to_pts: "Seeking is based on PTS" = lib.AVFMT_SEEK_TO_PTS + +cdef class ContainerFormat: + """Descriptor of a container format. + + :param str name: The name of the format. + :param str mode: ``'r'`` or ``'w'`` for input and output formats; defaults + to None which will grab either. + + """ + + def __cinit__(self, name, mode=None): + if name is _cinit_bypass_sentinel: + return + + # We need to hold onto the original name because AVInputFormat.name is + # actually comma-separated, and so we need to remember which one this was. + self.name = name + + # Searches comma-separated names. + if mode is None or mode == "r": + self.iptr = lib.av_find_input_format(name) + + if mode is None or mode == "w": + self.optr = lib.av_guess_format(name, NULL, NULL) + + if not self.iptr and not self.optr: + raise ValueError(f"no container format {name!r}") + + def __repr__(self): + return f"" + + @property + def descriptor(self): + if self.iptr: + return wrap_avclass(self.iptr.priv_class) + else: + return wrap_avclass(self.optr.priv_class) + + @property + def options(self): + return self.descriptor.options + + @property + def input(self): + """An input-only view of this format.""" + if self.iptr == NULL: + return None + elif self.optr == NULL: + return self + else: + return build_container_format(self.iptr, NULL) + + @property + def output(self): + """An output-only view of this format.""" + if self.optr == NULL: + return None + elif self.iptr == NULL: + return self + else: + return build_container_format(NULL, self.optr) + + @property + def is_input(self): + return self.iptr != NULL + + @property + def is_output(self): + return self.optr != NULL + + @property + def long_name(self): + # We prefer the output names since the inputs may represent + # multiple formats. + return self.optr.long_name if self.optr else self.iptr.long_name + + @property + def extensions(self): + cdef set exts = set() + if self.iptr and self.iptr.extensions: + exts.update(self.iptr.extensions.split(",")) + if self.optr and self.optr.extensions: + exts.update(self.optr.extensions.split(",")) + return exts + + @property + def flags(self): + """ + Get the flags bitmask for the format. + + :rtype: int + """ + return ( + (self.iptr.flags if self.iptr else 0) | + (self.optr.flags if self.optr else 0) + ) + + @property + def no_file(self): + return bool(self.flags & lib.AVFMT_NOFILE) + + +cdef get_output_format_names(): + names = set() + cdef const lib.AVOutputFormat *ptr + cdef void *opaque = NULL + while True: + ptr = lib.av_muxer_iterate(&opaque) + if ptr: + names.add(ptr.name) + else: + break + return names + +cdef get_input_format_names(): + names = set() + cdef const lib.AVInputFormat *ptr + cdef void *opaque = NULL + while True: + ptr = lib.av_demuxer_iterate(&opaque) + if ptr: + names.add(ptr.name) + else: + break + return names + +formats_available = get_output_format_names() +formats_available.update(get_input_format_names()) + + +format_descriptor = wrap_avclass(lib.avformat_get_class()) diff --git a/lib/python3.10/site-packages/av/frame.pxd b/lib/python3.10/site-packages/av/frame.pxd new file mode 100644 index 0000000000000000000000000000000000000000..6d7214b7caf4edae8f59075a32c805574ccdc514 --- /dev/null +++ b/lib/python3.10/site-packages/av/frame.pxd @@ -0,0 +1,14 @@ +cimport libav as lib + +from av.packet cimport Packet +from av.sidedata.sidedata cimport _SideDataContainer + + +cdef class Frame: + cdef lib.AVFrame *ptr + # We define our own time. + cdef lib.AVRational _time_base + cdef _rebase_time(self, lib.AVRational) + cdef _SideDataContainer _side_data + cdef _copy_internal_attributes(self, Frame source, bint data_layout=?) + cdef _init_user_attributes(self) diff --git a/lib/python3.10/site-packages/av/logging.pxd b/lib/python3.10/site-packages/av/logging.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a886a0f206e07928b36ff0b4c3799409446e27bd --- /dev/null +++ b/lib/python3.10/site-packages/av/logging.pxd @@ -0,0 +1,2 @@ + +cpdef get_last_error() diff --git a/lib/python3.10/site-packages/av/logging.pyi b/lib/python3.10/site-packages/av/logging.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8c32de77d21c75ca4f38fbd78ecb5d7c2871c2d6 --- /dev/null +++ b/lib/python3.10/site-packages/av/logging.pyi @@ -0,0 +1,33 @@ +from typing import Any, Callable + +PANIC: int +FATAL: int +ERROR: int +WARNING: int +INFO: int +VERBOSE: int +DEBUG: int +TRACE: int +CRITICAL: int + +def adapt_level(level: int) -> int: ... +def get_level() -> int | None: ... +def set_level(level: int | None) -> None: ... +def set_libav_level(level: int) -> None: ... +def restore_default_callback() -> None: ... +def get_skip_repeated() -> bool: ... +def set_skip_repeated(v: bool) -> None: ... +def get_last_error() -> tuple[int, tuple[int, str, str] | None]: ... +def log(level: int, name: str, message: str) -> None: ... + +class Capture: + logs: list[tuple[int, str, str]] + + def __init__(self, local: bool = True) -> None: ... + def __enter__(self) -> list[tuple[int, str, str]]: ... + def __exit__( + self, + type_: type | None, + value: Exception | None, + traceback: Callable[..., Any] | None, + ) -> None: ... diff --git a/lib/python3.10/site-packages/av/opaque.pxd b/lib/python3.10/site-packages/av/opaque.pxd new file mode 100644 index 0000000000000000000000000000000000000000..f5c38d7fab9deb147f679b66755902bba63a9603 --- /dev/null +++ b/lib/python3.10/site-packages/av/opaque.pxd @@ -0,0 +1,12 @@ +cimport libav as lib + + +cdef class OpaqueContainer: + cdef dict _by_name + + cdef lib.AVBufferRef *add(self, object v) + cdef object get(self, bytes name) + cdef object pop(self, bytes name) + + +cdef OpaqueContainer opaque_container diff --git a/lib/python3.10/site-packages/av/option.pxd b/lib/python3.10/site-packages/av/option.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9087b811c06e3dae2a25ce8557d37e6f7a67c7d3 --- /dev/null +++ b/lib/python3.10/site-packages/av/option.pxd @@ -0,0 +1,21 @@ +cimport libav as lib + + +cdef class BaseOption: + + cdef const lib.AVOption *ptr + + +cdef class Option(BaseOption): + + cdef readonly tuple choices + + +cdef class OptionChoice(BaseOption): + + cdef readonly bint is_default + + +cdef Option wrap_option(tuple choices, const lib.AVOption *ptr) + +cdef OptionChoice wrap_option_choice(const lib.AVOption *ptr, bint is_default) diff --git a/lib/python3.10/site-packages/av/option.pyi b/lib/python3.10/site-packages/av/option.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f989a1138d248a63a33916cfd5abf6c4505e28cf --- /dev/null +++ b/lib/python3.10/site-packages/av/option.pyi @@ -0,0 +1,55 @@ +from enum import Enum, Flag +from typing import cast + +class OptionType(Enum): + FLAGS = cast(int, ...) + INT = cast(int, ...) + INT64 = cast(int, ...) + DOUBLE = cast(int, ...) + FLOAT = cast(int, ...) + STRING = cast(int, ...) + RATIONAL = cast(int, ...) + BINARY = cast(int, ...) + DICT = cast(int, ...) + CONST = cast(int, ...) + IMAGE_SIZE = cast(int, ...) + PIXEL_FMT = cast(int, ...) + SAMPLE_FMT = cast(int, ...) + VIDEO_RATE = cast(int, ...) + DURATION = cast(int, ...) + COLOR = cast(int, ...) + CHANNEL_LAYOUT = cast(int, ...) + BOOL = cast(int, ...) + +class OptionFlags(Flag): + ENCODING_PARAM = cast(int, ...) + DECODING_PARAM = cast(int, ...) + AUDIO_PARAM = cast(int, ...) + VIDEO_PARAM = cast(int, ...) + SUBTITLE_PARAM = cast(int, ...) + EXPORT = cast(int, ...) + READONLY = cast(int, ...) + FILTERING_PARAM = cast(int, ...) + +class BaseOption: + name: str + help: str + flags: int + is_encoding_param: bool + is_decoding_param: bool + is_audio_param: bool + is_video_param: bool + is_subtitle_param: bool + is_export: bool + is_readonly: bool + is_filtering_param: bool + +class Option(BaseOption): + type: OptionType + offset: int + default: int + min: int + max: int + +class OptionChoice(BaseOption): + value: int diff --git a/lib/python3.10/site-packages/av/option.pyx b/lib/python3.10/site-packages/av/option.pyx new file mode 100644 index 0000000000000000000000000000000000000000..e58c4c13f1507d0acd77e3366abece38bd5728e0 --- /dev/null +++ b/lib/python3.10/site-packages/av/option.pyx @@ -0,0 +1,172 @@ +cimport libav as lib + +from av.utils cimport flag_in_bitfield + +from enum import Enum, Flag + + +cdef object _cinit_sentinel = object() + +cdef Option wrap_option(tuple choices, const lib.AVOption *ptr): + if ptr == NULL: + return None + cdef Option obj = Option(_cinit_sentinel) + obj.ptr = ptr + obj.choices = choices + return obj + + +class OptionType(Enum): + FLAGS = lib.AV_OPT_TYPE_FLAGS + INT = lib.AV_OPT_TYPE_INT + INT64 = lib.AV_OPT_TYPE_INT64 + DOUBLE = lib.AV_OPT_TYPE_DOUBLE + FLOAT = lib.AV_OPT_TYPE_FLOAT + STRING = lib.AV_OPT_TYPE_STRING + RATIONAL = lib.AV_OPT_TYPE_RATIONAL + BINARY = lib.AV_OPT_TYPE_BINARY + DICT = lib.AV_OPT_TYPE_DICT + UINT64 = lib.AV_OPT_TYPE_UINT64 + CONST = lib.AV_OPT_TYPE_CONST + IMAGE_SIZE = lib.AV_OPT_TYPE_IMAGE_SIZE + PIXEL_FMT = lib.AV_OPT_TYPE_PIXEL_FMT + SAMPLE_FMT = lib.AV_OPT_TYPE_SAMPLE_FMT + VIDEO_RATE = lib.AV_OPT_TYPE_VIDEO_RATE + DURATION = lib.AV_OPT_TYPE_DURATION + COLOR = lib.AV_OPT_TYPE_COLOR + CHANNEL_LAYOUT = lib.AV_OPT_TYPE_CHLAYOUT + BOOL = lib.AV_OPT_TYPE_BOOL + +cdef tuple _INT_TYPES = ( + lib.AV_OPT_TYPE_FLAGS, + lib.AV_OPT_TYPE_INT, + lib.AV_OPT_TYPE_INT64, + lib.AV_OPT_TYPE_PIXEL_FMT, + lib.AV_OPT_TYPE_SAMPLE_FMT, + lib.AV_OPT_TYPE_DURATION, + lib.AV_OPT_TYPE_CHLAYOUT, + lib.AV_OPT_TYPE_BOOL, +) + +class OptionFlags(Flag): + ENCODING_PARAM = lib.AV_OPT_FLAG_ENCODING_PARAM + DECODING_PARAM = lib.AV_OPT_FLAG_DECODING_PARAM + AUDIO_PARAM = lib.AV_OPT_FLAG_AUDIO_PARAM + VIDEO_PARAM = lib.AV_OPT_FLAG_VIDEO_PARAM + SUBTITLE_PARAM = lib.AV_OPT_FLAG_SUBTITLE_PARAM + EXPORT = lib.AV_OPT_FLAG_EXPORT + READONLY = lib.AV_OPT_FLAG_READONLY + FILTERING_PARAM = lib.AV_OPT_FLAG_FILTERING_PARAM + + +cdef class BaseOption: + def __cinit__(self, sentinel): + if sentinel is not _cinit_sentinel: + raise RuntimeError(f"Cannot construct av.{self.__class__.__name__}") + + @property + def name(self): + return self.ptr.name + + @property + def help(self): + return self.ptr.help if self.ptr.help != NULL else "" + + @property + def flags(self): + return self.ptr.flags + + # Option flags + @property + def is_encoding_param(self): + return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_ENCODING_PARAM) + @property + def is_decoding_param(self): + return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_DECODING_PARAM) + @property + def is_audio_param(self): + return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_AUDIO_PARAM) + @property + def is_video_param(self): + return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_VIDEO_PARAM) + @property + def is_subtitle_param(self): + return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_SUBTITLE_PARAM) + @property + def is_export(self): + return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_EXPORT) + @property + def is_readonly(self): + return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_READONLY) + @property + def is_filtering_param(self): + return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_FILTERING_PARAM) + + +cdef class Option(BaseOption): + @property + def type(self): + return OptionType(self.ptr.type) + + @property + def offset(self): + """ + This can be used to find aliases of an option. + Options in a particular descriptor with the same offset are aliases. + """ + return self.ptr.offset + + @property + def default(self): + if self.ptr.type in _INT_TYPES: + return self.ptr.default_val.i64 + if self.ptr.type in (lib.AV_OPT_TYPE_DOUBLE, lib.AV_OPT_TYPE_FLOAT, + lib.AV_OPT_TYPE_RATIONAL): + return self.ptr.default_val.dbl + if self.ptr.type in (lib.AV_OPT_TYPE_STRING, lib.AV_OPT_TYPE_BINARY, + lib.AV_OPT_TYPE_IMAGE_SIZE, lib.AV_OPT_TYPE_VIDEO_RATE, + lib.AV_OPT_TYPE_COLOR): + return self.ptr.default_val.str if self.ptr.default_val.str != NULL else "" + + def _norm_range(self, value): + if self.ptr.type in _INT_TYPES: + return int(value) + return value + + @property + def min(self): + return self._norm_range(self.ptr.min) + + @property + def max(self): + return self._norm_range(self.ptr.max) + + def __repr__(self): + return ( + f"" + ) + + +cdef OptionChoice wrap_option_choice(const lib.AVOption *ptr, bint is_default): + if ptr == NULL: + return None + + cdef OptionChoice obj = OptionChoice(_cinit_sentinel) + obj.ptr = ptr + obj.is_default = is_default + return obj + + +cdef class OptionChoice(BaseOption): + """ + Represents AV_OPT_TYPE_CONST options which are essentially + choices of non-const option with same unit. + """ + + @property + def value(self): + return self.ptr.default_val.i64 + + def __repr__(self): + return f"" diff --git a/lib/python3.10/site-packages/av/packet.pxd b/lib/python3.10/site-packages/av/packet.pxd new file mode 100644 index 0000000000000000000000000000000000000000..ca21e6b76dcf50c86e9c4a79a4ddf4e64410e04d --- /dev/null +++ b/lib/python3.10/site-packages/av/packet.pxd @@ -0,0 +1,21 @@ +cimport libav as lib + +from av.buffer cimport Buffer +from av.bytesource cimport ByteSource +from av.stream cimport Stream + + +cdef class Packet(Buffer): + + cdef lib.AVPacket* ptr + + cdef Stream _stream + + # We track our own time. + cdef lib.AVRational _time_base + cdef _rebase_time(self, lib.AVRational) + + # Hold onto the original reference. + cdef ByteSource source + cdef size_t _buffer_size(self) + cdef void* _buffer_ptr(self) diff --git a/lib/python3.10/site-packages/av/packet.pyi b/lib/python3.10/site-packages/av/packet.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9bdbb8c6236faae4fc926941d02c6c5e1b3fd6c4 --- /dev/null +++ b/lib/python3.10/site-packages/av/packet.pyi @@ -0,0 +1,25 @@ +from fractions import Fraction + +from av.subtitles.subtitle import SubtitleSet + +from .buffer import Buffer +from .stream import Stream + +class Packet(Buffer): + stream: Stream + stream_index: int + time_base: Fraction + pts: int | None + dts: int + pos: int | None + size: int + duration: int | None + opaque: object + is_keyframe: bool + is_corrupt: bool + is_discard: bool + is_trusted: bool + is_disposable: bool + + def __init__(self, input: int | bytes | None = None) -> None: ... + def decode(self) -> list[SubtitleSet]: ... diff --git a/lib/python3.10/site-packages/av/plane.pyx b/lib/python3.10/site-packages/av/plane.pyx new file mode 100644 index 0000000000000000000000000000000000000000..c733b20a72e050fead8cba9fa361c1b08a09de24 --- /dev/null +++ b/lib/python3.10/site-packages/av/plane.pyx @@ -0,0 +1,20 @@ + +cdef class Plane(Buffer): + """ + Base class for audio and video planes. + + See also :class:`~av.audio.plane.AudioPlane` and :class:`~av.video.plane.VideoPlane`. + """ + + def __cinit__(self, Frame frame, int index): + self.frame = frame + self.index = index + + def __repr__(self): + return ( + f"" + ) + + cdef void* _buffer_ptr(self): + return self.frame.ptr.extended_data[self.index] diff --git a/lib/python3.10/site-packages/av/py.typed b/lib/python3.10/site-packages/av/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/av/stream.pxd b/lib/python3.10/site-packages/av/stream.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c847f641e8a3602ffd0fc65d3440a964dd53f381 --- /dev/null +++ b/lib/python3.10/site-packages/av/stream.pxd @@ -0,0 +1,26 @@ +cimport libav as lib + +from av.codec.context cimport CodecContext +from av.container.core cimport Container +from av.frame cimport Frame +from av.packet cimport Packet + + +cdef class Stream: + cdef lib.AVStream *ptr + + # Stream attributes. + cdef readonly Container container + cdef readonly dict metadata + + # CodecContext attributes. + cdef readonly CodecContext codec_context + + # Private API. + cdef _init(self, Container, lib.AVStream*, CodecContext) + cdef _finalize_for_output(self) + cdef _set_time_base(self, value) + cdef _set_id(self, value) + + +cdef Stream wrap_stream(Container, lib.AVStream*, CodecContext) diff --git a/lib/python3.10/site-packages/av/stream.pyi b/lib/python3.10/site-packages/av/stream.pyi new file mode 100644 index 0000000000000000000000000000000000000000..88dc7c00b875a5f985b3099a28556ba8f1eef973 --- /dev/null +++ b/lib/python3.10/site-packages/av/stream.pyi @@ -0,0 +1,48 @@ +from enum import Flag +from fractions import Fraction +from typing import Literal, cast + +from .codec import Codec, CodecContext +from .container import Container + +class Disposition(Flag): + default = cast(int, ...) + dub = cast(int, ...) + original = cast(int, ...) + comment = cast(int, ...) + lyrics = cast(int, ...) + karaoke = cast(int, ...) + forced = cast(int, ...) + hearing_impaired = cast(int, ...) + visual_impaired = cast(int, ...) + clean_effects = cast(int, ...) + attached_pic = cast(int, ...) + timed_thumbnails = cast(int, ...) + non_diegetic = cast(int, ...) + captions = cast(int, ...) + descriptions = cast(int, ...) + metadata = cast(int, ...) + dependent = cast(int, ...) + still_image = cast(int, ...) + multilayer = cast(int, ...) + +class Stream: + name: str | None + container: Container + codec: Codec + codec_context: CodecContext + metadata: dict[str, str] + id: int + profiles: list[str] + profile: str | None + index: int + time_base: Fraction | None + average_rate: Fraction | None + base_rate: Fraction | None + guessed_rate: Fraction | None + start_time: int | None + duration: int | None + disposition: Disposition + frames: int + language: str | None + type: Literal["video", "audio", "data", "subtitle", "attachment"] diff --git a/lib/python3.10/site-packages/av/stream.pyx b/lib/python3.10/site-packages/av/stream.pyx new file mode 100644 index 0000000000000000000000000000000000000000..90f10d038288ec9e90b11cc436bad9e482dde0f5 --- /dev/null +++ b/lib/python3.10/site-packages/av/stream.pyx @@ -0,0 +1,269 @@ +cimport libav as lib + +from enum import Flag + +from av.error cimport err_check +from av.packet cimport Packet +from av.utils cimport ( + avdict_to_dict, + avrational_to_fraction, + dict_to_avdict, + to_avrational, +) + + +class Disposition(Flag): + default = 1 << 0 + dub = 1 << 1 + original = 1 << 2 + comment = 1 << 3 + lyrics = 1 << 4 + karaoke = 1 << 5 + forced = 1 << 6 + hearing_impaired = 1 << 7 + visual_impaired = 1 << 8 + clean_effects = 1 << 9 + attached_pic = 1 << 10 + timed_thumbnails = 1 << 11 + non_diegetic = 1 << 12 + captions = 1 << 16 + descriptions = 1 << 17 + metadata = 1 << 18 + dependent = 1 << 19 + still_image = 1 << 20 + multilayer = 1 << 21 + + +cdef object _cinit_bypass_sentinel = object() + +cdef Stream wrap_stream(Container container, lib.AVStream *c_stream, CodecContext codec_context): + """Build an av.Stream for an existing AVStream. + + The AVStream MUST be fully constructed and ready for use before this is + called. + + """ + + # This better be the right one... + assert container.ptr.streams[c_stream.index] == c_stream + + cdef Stream py_stream + + if c_stream.codecpar.codec_type == lib.AVMEDIA_TYPE_VIDEO: + from av.video.stream import VideoStream + py_stream = VideoStream.__new__(VideoStream, _cinit_bypass_sentinel) + elif c_stream.codecpar.codec_type == lib.AVMEDIA_TYPE_AUDIO: + from av.audio.stream import AudioStream + py_stream = AudioStream.__new__(AudioStream, _cinit_bypass_sentinel) + elif c_stream.codecpar.codec_type == lib.AVMEDIA_TYPE_SUBTITLE: + from av.subtitles.stream import SubtitleStream + py_stream = SubtitleStream.__new__(SubtitleStream, _cinit_bypass_sentinel) + elif c_stream.codecpar.codec_type == lib.AVMEDIA_TYPE_ATTACHMENT: + from av.attachments.stream import AttachmentStream + py_stream = AttachmentStream.__new__(AttachmentStream, _cinit_bypass_sentinel) + elif c_stream.codecpar.codec_type == lib.AVMEDIA_TYPE_DATA: + from av.data.stream import DataStream + py_stream = DataStream.__new__(DataStream, _cinit_bypass_sentinel) + else: + py_stream = Stream.__new__(Stream, _cinit_bypass_sentinel) + + py_stream._init(container, c_stream, codec_context) + return py_stream + + +cdef class Stream: + """ + A single stream of audio, video or subtitles within a :class:`.Container`. + + :: + + >>> fh = av.open(video_path) + >>> stream = fh.streams.video[0] + >>> stream + + + This encapsulates a :class:`.CodecContext`, located at :attr:`Stream.codec_context`. + Attribute access is passed through to that context when attributes are missing + on the stream itself. E.g. ``stream.options`` will be the options on the + context. + """ + + def __cinit__(self, name): + if name is _cinit_bypass_sentinel: + return + raise RuntimeError("cannot manually instantiate Stream") + + cdef _init(self, Container container, lib.AVStream *stream, CodecContext codec_context): + self.container = container + self.ptr = stream + + self.codec_context = codec_context + if self.codec_context: + self.codec_context.stream_index = stream.index + + self.metadata = avdict_to_dict( + stream.metadata, + encoding=self.container.metadata_encoding, + errors=self.container.metadata_errors, + ) + + def __repr__(self): + name = getattr(self, "name", None) + return ( + f"'}/" + f"{name or ''} at 0x{id(self):x}>" + ) + + def __setattr__(self, name, value): + if name == "id": + self._set_id(value) + return + if name == "disposition": + self.ptr.disposition = value + return + + # Convenience setter for codec context properties. + if self.codec_context is not None: + setattr(self.codec_context, name, value) + + if name == "time_base": + self._set_time_base(value) + + cdef _finalize_for_output(self): + + dict_to_avdict( + &self.ptr.metadata, self.metadata, + encoding=self.container.metadata_encoding, + errors=self.container.metadata_errors, + ) + + if not self.ptr.time_base.num: + self.ptr.time_base = self.codec_context.ptr.time_base + + # It prefers if we pass it parameters via this other object. + # Lets just copy what we want. + err_check(lib.avcodec_parameters_from_context(self.ptr.codecpar, self.codec_context.ptr)) + + @property + def id(self): + """ + The format-specific ID of this stream. + + :type: int + + """ + return self.ptr.id + + cdef _set_id(self, value): + """ + Setter used by __setattr__ for the id property. + """ + if value is None: + self.ptr.id = 0 + else: + self.ptr.id = value + + @property + def profiles(self): + """ + List the available profiles for this stream. + + :type: list[str] + """ + if self.codec_context: + return self.codec_context.profiles + else: + return [] + + @property + def profile(self): + """ + The profile of this stream. + + :type: str + """ + if self.codec_context: + return self.codec_context.profile + else: + return None + + @property + def index(self): + """ + The index of this stream in its :class:`.Container`. + + :type: int + """ + return self.ptr.index + + + @property + def time_base(self): + """ + The unit of time (in fractional seconds) in which timestamps are expressed. + + :type: fractions.Fraction | None + + """ + return avrational_to_fraction(&self.ptr.time_base) + + cdef _set_time_base(self, value): + """ + Setter used by __setattr__ for the time_base property. + """ + to_avrational(value, &self.ptr.time_base) + + @property + def start_time(self): + """ + The presentation timestamp in :attr:`time_base` units of the first + frame in this stream. + + :type: int | None + """ + if self.ptr.start_time != lib.AV_NOPTS_VALUE: + return self.ptr.start_time + + @property + def duration(self): + """ + The duration of this stream in :attr:`time_base` units. + + :type: int | None + + """ + if self.ptr.duration != lib.AV_NOPTS_VALUE: + return self.ptr.duration + + @property + def frames(self): + """ + The number of frames this stream contains. + + Returns ``0`` if it is not known. + + :type: int + """ + return self.ptr.nb_frames + + @property + def language(self): + """ + The language of the stream. + + :type: str | None + """ + return self.metadata.get("language") + + @property + def disposition(self): + return Disposition(self.ptr.disposition) + + @property + def type(self): + """ + The type of the stream. + + :type: Literal["audio", "video", "subtitle", "data", "attachment"] + """ + return lib.av_get_media_type_string(self.ptr.codecpar.codec_type) diff --git a/lib/python3.10/site-packages/av/utils.pyx b/lib/python3.10/site-packages/av/utils.pyx new file mode 100644 index 0000000000000000000000000000000000000000..190bbf4d7f7f8bf2fbda6596f4e83b336f980ee2 --- /dev/null +++ b/lib/python3.10/site-packages/av/utils.pyx @@ -0,0 +1,78 @@ +from libc.stdint cimport uint64_t + +from fractions import Fraction + +cimport libav as lib + +from av.error cimport err_check + +# === DICTIONARIES === +# ==================== + +cdef _decode(char *s, encoding, errors): + return (s).decode(encoding, errors) + +cdef bytes _encode(s, encoding, errors): + return s.encode(encoding, errors) + +cdef dict avdict_to_dict(lib.AVDictionary *input, str encoding, str errors): + cdef lib.AVDictionaryEntry *element = NULL + cdef dict output = {} + while True: + element = lib.av_dict_get(input, "", element, lib.AV_DICT_IGNORE_SUFFIX) + if element == NULL: + break + output[_decode(element.key, encoding, errors)] = _decode(element.value, encoding, errors) + return output + + +cdef dict_to_avdict(lib.AVDictionary **dst, dict src, str encoding, str errors): + lib.av_dict_free(dst) + for key, value in src.items(): + err_check( + lib.av_dict_set( + dst, + _encode(key, encoding, errors), + _encode(value, encoding, errors), + 0 + ) + ) + + +# === FRACTIONS === +# ================= + +cdef object avrational_to_fraction(const lib.AVRational *input): + if input.num and input.den: + return Fraction(input.num, input.den) + + +cdef void to_avrational(object frac, lib.AVRational *input): + input.num = frac.numerator + input.den = frac.denominator + + +# === OTHER === +# ============= + + +cdef check_ndarray(object array, object dtype, int ndim): + """ + Check a numpy array has the expected data type and number of dimensions. + """ + if array.dtype != dtype: + raise ValueError(f"Expected numpy array with dtype `{dtype}` but got `{array.dtype}`") + if array.ndim != ndim: + raise ValueError(f"Expected numpy array with ndim `{ndim}` but got `{array.ndim}`") + + +cdef flag_in_bitfield(uint64_t bitfield, uint64_t flag): + # Not every flag exists in every version of FFMpeg, so we define them to 0. + if not flag: + return None + return bool(bitfield & flag) + + +# === BACKWARDS COMPAT === + +from .error import err_check diff --git a/lib/python3.10/site-packages/multiprocess/__init__.py b/lib/python3.10/site-packages/multiprocess/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..20c8f7a463266115b2febd0edee6112e8ee91b39 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/__init__.py @@ -0,0 +1,39 @@ +# +# Package analogous to 'threading.py' but using processes +# +# multiprocessing/__init__.py +# +# This package is intended to duplicate the functionality (and much of +# the API) of threading.py but uses processes instead of threads. A +# subpackage 'multiprocessing.dummy' has the same API but is a simple +# wrapper for 'threading'. +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import sys +from . import context + +__version__ = '0.70.12.2' + +# +# Copy stuff from default context +# + +__all__ = [x for x in dir(context._default_context) if not x.startswith('_')] +globals().update((name, getattr(context._default_context, name)) for name in __all__) + +# +# XXX These should not really be documented or public. +# + +SUBDEBUG = 5 +SUBWARNING = 25 + +# +# Alias for main module -- will be reset by bootstrapping child processes +# + +if '__main__' in sys.modules: + sys.modules['__mp_main__'] = sys.modules['__main__'] diff --git a/lib/python3.10/site-packages/multiprocess/connection.py b/lib/python3.10/site-packages/multiprocess/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..235c48e7402d28db4c3f456cd8748b29401db7e7 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/connection.py @@ -0,0 +1,981 @@ +# +# A higher level module for using sockets (or Windows named pipes) +# +# multiprocessing/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] + +import io +import os +import sys +import socket +import struct +import time +import tempfile +import itertools + +try: + import _multiprocess as _multiprocessing +except ImportError: + import _multiprocessing + +from . import util + +from . import AuthenticationError, BufferTooShort +from .context import reduction +_ForkingPickler = reduction.ForkingPickler + +try: + import _winapi + from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE +except ImportError: + if sys.platform == 'win32': + raise + _winapi = None + +# +# +# + +BUFSIZE = 8192 +# A very generous timeout when it comes to local connections... +CONNECTION_TIMEOUT = 20. + +_mmap_counter = itertools.count() + +default_family = 'AF_INET' +families = ['AF_INET'] + +if hasattr(socket, 'AF_UNIX'): + default_family = 'AF_UNIX' + families += ['AF_UNIX'] + +if sys.platform == 'win32': + default_family = 'AF_PIPE' + families += ['AF_PIPE'] + + +def _init_timeout(timeout=CONNECTION_TIMEOUT): + return getattr(time,'monotonic',time.time)() + timeout + +def _check_timeout(t): + return getattr(time,'monotonic',time.time)() > t + +# +# +# + +def arbitrary_address(family): + ''' + Return an arbitrary free address for the given family + ''' + if family == 'AF_INET': + return ('localhost', 0) + elif family == 'AF_UNIX': + # Prefer abstract sockets if possible to avoid problems with the address + # size. When coding portable applications, some implementations have + # sun_path as short as 92 bytes in the sockaddr_un struct. + if util.abstract_sockets_supported: + return f"\0listener-{os.getpid()}-{next(_mmap_counter)}" + return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) + elif family == 'AF_PIPE': + return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % + (os.getpid(), next(_mmap_counter)), dir="") + else: + raise ValueError('unrecognized family') + +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) + +def address_type(address): + ''' + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + ''' + if type(address) == tuple: + return 'AF_INET' + elif type(address) is str and address.startswith('\\\\'): + return 'AF_PIPE' + elif type(address) is str or util.is_abstract_socket_namespace(address): + return 'AF_UNIX' + else: + raise ValueError('address type of %r unrecognized' % address) + +# +# Connection classes +# + +class _ConnectionBase: + _handle = None + + def __init__(self, handle, readable=True, writable=True): + handle = handle.__index__() + if handle < 0: + raise ValueError("invalid handle") + if not readable and not writable: + raise ValueError( + "at least one of `readable` and `writable` must be True") + self._handle = handle + self._readable = readable + self._writable = writable + + # XXX should we use util.Finalize instead of a __del__? + + def __del__(self): + if self._handle is not None: + self._close() + + def _check_closed(self): + if self._handle is None: + raise OSError("handle is closed") + + def _check_readable(self): + if not self._readable: + raise OSError("connection is write-only") + + def _check_writable(self): + if not self._writable: + raise OSError("connection is read-only") + + def _bad_message_length(self): + if self._writable: + self._readable = False + else: + self.close() + raise OSError("bad message length") + + @property + def closed(self): + """True if the connection is closed""" + return self._handle is None + + @property + def readable(self): + """True if the connection is readable""" + return self._readable + + @property + def writable(self): + """True if the connection is writable""" + return self._writable + + def fileno(self): + """File descriptor or handle of the connection""" + self._check_closed() + return self._handle + + def close(self): + """Close the connection""" + if self._handle is not None: + try: + self._close() + finally: + self._handle = None + + def send_bytes(self, buf, offset=0, size=None): + """Send the bytes data from a bytes-like object""" + self._check_closed() + self._check_writable() + m = memoryview(buf) + # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) + if m.itemsize > 1: + m = memoryview(bytes(m)) + n = len(m) + if offset < 0: + raise ValueError("offset is negative") + if n < offset: + raise ValueError("buffer length < offset") + if size is None: + size = n - offset + elif size < 0: + raise ValueError("size is negative") + elif offset + size > n: + raise ValueError("buffer length < offset + size") + self._send_bytes(m[offset:offset + size]) + + def send(self, obj): + """Send a (picklable) object""" + self._check_closed() + self._check_writable() + self._send_bytes(_ForkingPickler.dumps(obj)) + + def recv_bytes(self, maxlength=None): + """ + Receive bytes data as a bytes object. + """ + self._check_closed() + self._check_readable() + if maxlength is not None and maxlength < 0: + raise ValueError("negative maxlength") + buf = self._recv_bytes(maxlength) + if buf is None: + self._bad_message_length() + return buf.getvalue() + + def recv_bytes_into(self, buf, offset=0): + """ + Receive bytes data into a writeable bytes-like object. + Return the number of bytes read. + """ + self._check_closed() + self._check_readable() + with memoryview(buf) as m: + # Get bytesize of arbitrary buffer + itemsize = m.itemsize + bytesize = itemsize * len(m) + if offset < 0: + raise ValueError("negative offset") + elif offset > bytesize: + raise ValueError("offset too large") + result = self._recv_bytes() + size = result.tell() + if bytesize < offset + size: + raise BufferTooShort(result.getvalue()) + # Message can fit in dest + result.seek(0) + result.readinto(m[offset // itemsize : + (offset + size) // itemsize]) + return size + + def recv(self): + """Receive a (picklable) object""" + self._check_closed() + self._check_readable() + buf = self._recv_bytes() + return _ForkingPickler.loads(buf.getbuffer()) + + def poll(self, timeout=0.0): + """Whether there is any input available to be read""" + self._check_closed() + self._check_readable() + return self._poll(timeout) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +if _winapi: + + class PipeConnection(_ConnectionBase): + """ + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + """ + _got_empty_message = False + + def _close(self, _CloseHandle=_winapi.CloseHandle): + _CloseHandle(self._handle) + + def _send_bytes(self, buf): + ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nwritten, err = ov.GetOverlappedResult(True) + assert err == 0 + assert nwritten == len(buf) + + def _recv_bytes(self, maxsize=None): + if self._got_empty_message: + self._got_empty_message = False + return io.BytesIO() + else: + bsize = 128 if maxsize is None else min(maxsize, 128) + try: + ov, err = _winapi.ReadFile(self._handle, bsize, + overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nread, err = ov.GetOverlappedResult(True) + if err == 0: + f = io.BytesIO() + f.write(ov.getbuffer()) + return f + elif err == _winapi.ERROR_MORE_DATA: + return self._get_more_data(ov, maxsize) + except OSError as e: + if e.winerror == _winapi.ERROR_BROKEN_PIPE: + raise EOFError + else: + raise + raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") + + def _poll(self, timeout): + if (self._got_empty_message or + _winapi.PeekNamedPipe(self._handle)[0] != 0): + return True + return bool(wait([self], timeout)) + + def _get_more_data(self, ov, maxsize): + buf = ov.getbuffer() + f = io.BytesIO() + f.write(buf) + left = _winapi.PeekNamedPipe(self._handle)[1] + assert left > 0 + if maxsize is not None and len(buf) + left > maxsize: + self._bad_message_length() + ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) + rbytes, err = ov.GetOverlappedResult(True) + assert err == 0 + assert rbytes == left + f.write(ov.getbuffer()) + return f + + +class Connection(_ConnectionBase): + """ + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + """ + + if _winapi: + def _close(self, _close=_multiprocessing.closesocket): + _close(self._handle) + _write = _multiprocessing.send + _read = _multiprocessing.recv + else: + def _close(self, _close=os.close): + _close(self._handle) + _write = os.write + _read = os.read + + def _send(self, buf, write=_write): + remaining = len(buf) + while True: + n = write(self._handle, buf) + remaining -= n + if remaining == 0: + break + buf = buf[n:] + + def _recv(self, size, read=_read): + buf = io.BytesIO() + handle = self._handle + remaining = size + while remaining > 0: + chunk = read(handle, remaining) + n = len(chunk) + if n == 0: + if remaining == size: + raise EOFError + else: + raise OSError("got end of file during message") + buf.write(chunk) + remaining -= n + return buf + + def _send_bytes(self, buf): + n = len(buf) + if n > 0x7fffffff: + pre_header = struct.pack("!i", -1) + header = struct.pack("!Q", n) + self._send(pre_header) + self._send(header) + self._send(buf) + else: + # For wire compatibility with 3.7 and lower + header = struct.pack("!i", n) + if n > 16384: + # The payload is large so Nagle's algorithm won't be triggered + # and we'd better avoid the cost of concatenation. + self._send(header) + self._send(buf) + else: + # Issue #20540: concatenate before sending, to avoid delays due + # to Nagle's algorithm on a TCP socket. + # Also note we want to avoid sending a 0-length buffer separately, + # to avoid "broken pipe" errors if the other end closed the pipe. + self._send(header + buf) + + def _recv_bytes(self, maxsize=None): + buf = self._recv(4) + size, = struct.unpack("!i", buf.getvalue()) + if size == -1: + buf = self._recv(8) + size, = struct.unpack("!Q", buf.getvalue()) + if maxsize is not None and size > maxsize: + return None + return self._recv(size) + + def _poll(self, timeout): + r = wait([self], timeout) + return bool(r) + + +# +# Public functions +# + +class Listener(object): + ''' + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + ''' + def __init__(self, address=None, family=None, backlog=1, authkey=None): + family = family or (address and address_type(address)) \ + or default_family + address = address or arbitrary_address(family) + + _validate_family(family) + if family == 'AF_PIPE': + self._listener = PipeListener(address, backlog) + else: + self._listener = SocketListener(address, family, backlog) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + self._authkey = authkey + + def accept(self): + ''' + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + ''' + if self._listener is None: + raise OSError('listener is closed') + c = self._listener.accept() + if self._authkey: + deliver_challenge(c, self._authkey) + answer_challenge(c, self._authkey) + return c + + def close(self): + ''' + Close the bound socket or named pipe of `self`. + ''' + listener = self._listener + if listener is not None: + self._listener = None + listener.close() + + @property + def address(self): + return self._listener._address + + @property + def last_accepted(self): + return self._listener._last_accepted + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address, family=None, authkey=None): + ''' + Returns a connection to the address of a `Listener` + ''' + family = family or address_type(address) + _validate_family(family) + if family == 'AF_PIPE': + c = PipeClient(address) + else: + c = SocketClient(address) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + if authkey is not None: + answer_challenge(c, authkey) + deliver_challenge(c, authkey) + + return c + + +if sys.platform != 'win32': + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + if duplex: + s1, s2 = socket.socketpair() + s1.setblocking(True) + s2.setblocking(True) + c1 = Connection(s1.detach()) + c2 = Connection(s2.detach()) + else: + fd1, fd2 = os.pipe() + c1 = Connection(fd1, writable=False) + c2 = Connection(fd2, readable=False) + + return c1, c2 + +else: + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + address = arbitrary_address('AF_PIPE') + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = BUFSIZE, BUFSIZE + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, BUFSIZE + + h1 = _winapi.CreateNamedPipe( + address, openmode | _winapi.FILE_FLAG_OVERLAPPED | + _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, + # default security descriptor: the handle cannot be inherited + _winapi.NULL + ) + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + _winapi.SetNamedPipeHandleState( + h2, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + + overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) + _, err = overlapped.GetOverlappedResult(True) + assert err == 0 + + c1 = PipeConnection(h1, writable=duplex) + c2 = PipeConnection(h2, readable=duplex) + + return c1, c2 + +# +# Definitions for connections based on sockets +# + +class SocketListener(object): + ''' + Representation of a socket which is bound to an address and listening + ''' + def __init__(self, address, family, backlog=1): + self._socket = socket.socket(getattr(socket, family)) + try: + # SO_REUSEADDR has different semantics on Windows (issue #2550). + if os.name == 'posix': + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + self._socket.setblocking(True) + self._socket.bind(address) + self._socket.listen(backlog) + self._address = self._socket.getsockname() + except OSError: + self._socket.close() + raise + self._family = family + self._last_accepted = None + + if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): + # Linux abstract socket namespaces do not need to be explicitly unlinked + self._unlink = util.Finalize( + self, os.unlink, args=(address,), exitpriority=0 + ) + else: + self._unlink = None + + def accept(self): + s, self._last_accepted = self._socket.accept() + s.setblocking(True) + return Connection(s.detach()) + + def close(self): + try: + self._socket.close() + finally: + unlink = self._unlink + if unlink is not None: + self._unlink = None + unlink() + + +def SocketClient(address): + ''' + Return a connection object connected to the socket given by `address` + ''' + family = address_type(address) + with socket.socket( getattr(socket, family) ) as s: + s.setblocking(True) + s.connect(address) + return Connection(s.detach()) + +# +# Definitions for connections based on named pipes +# + +if sys.platform == 'win32': + + class PipeListener(object): + ''' + Representation of a named pipe + ''' + def __init__(self, address, backlog=None): + self._address = address + self._handle_queue = [self._new_handle(first=True)] + + self._last_accepted = None + util.sub_debug('listener created with address=%r', self._address) + self.close = util.Finalize( + self, PipeListener._finalize_pipe_listener, + args=(self._handle_queue, self._address), exitpriority=0 + ) + + def _new_handle(self, first=False): + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + return _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL + ) + + def accept(self): + self._handle_queue.append(self._new_handle()) + handle = self._handle_queue.pop(0) + try: + ov = _winapi.ConnectNamedPipe(handle, overlapped=True) + except OSError as e: + if e.winerror != _winapi.ERROR_NO_DATA: + raise + # ERROR_NO_DATA can occur if a client has already connected, + # written data and then disconnected -- see Issue 14725. + else: + try: + res = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + except: + ov.cancel() + _winapi.CloseHandle(handle) + raise + finally: + _, err = ov.GetOverlappedResult(True) + assert err == 0 + return PipeConnection(handle) + + @staticmethod + def _finalize_pipe_listener(queue, address): + util.sub_debug('closing listener with address=%r', address) + for handle in queue: + _winapi.CloseHandle(handle) + + def PipeClient(address): + ''' + Return a connection object connected to the pipe given by `address` + ''' + t = _init_timeout() + while 1: + try: + _winapi.WaitNamedPipe(address, 1000) + h = _winapi.CreateFile( + address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, + 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + except OSError as e: + if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): + raise + else: + break + else: + raise + + _winapi.SetNamedPipeHandleState( + h, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + return PipeConnection(h) + +# +# Authentication stuff +# + +MESSAGE_LENGTH = 20 + +CHALLENGE = b'#CHALLENGE#' +WELCOME = b'#WELCOME#' +FAILURE = b'#FAILURE#' + +def deliver_challenge(connection, authkey): + import hmac + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + message = os.urandom(MESSAGE_LENGTH) + connection.send_bytes(CHALLENGE + message) + digest = hmac.new(authkey, message, 'md5').digest() + response = connection.recv_bytes(256) # reject large message + if response == digest: + connection.send_bytes(WELCOME) + else: + connection.send_bytes(FAILURE) + raise AuthenticationError('digest received was wrong') + +def answer_challenge(connection, authkey): + import hmac + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + message = connection.recv_bytes(256) # reject large message + assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message + message = message[len(CHALLENGE):] + digest = hmac.new(authkey, message, 'md5').digest() + connection.send_bytes(digest) + response = connection.recv_bytes(256) # reject large message + if response != WELCOME: + raise AuthenticationError('digest sent was rejected') + +# +# Support for using xmlrpclib for serialization +# + +class ConnectionWrapper(object): + def __init__(self, conn, dumps, loads): + self._conn = conn + self._dumps = dumps + self._loads = loads + for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): + obj = getattr(conn, attr) + setattr(self, attr, obj) + def send(self, obj): + s = self._dumps(obj) + self._conn.send_bytes(s) + def recv(self): + s = self._conn.recv_bytes() + return self._loads(s) + +def _xml_dumps(obj): + return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') + +def _xml_loads(s): + (obj,), method = xmlrpclib.loads(s.decode('utf-8')) + return obj + +class XmlListener(Listener): + def accept(self): + global xmlrpclib + import xmlrpc.client as xmlrpclib + obj = Listener.accept(self) + return ConnectionWrapper(obj, _xml_dumps, _xml_loads) + +def XmlClient(*args, **kwds): + global xmlrpclib + import xmlrpc.client as xmlrpclib + return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) + +# +# Wait +# + +if sys.platform == 'win32': + + def _exhaustive_wait(handles, timeout): + # Return ALL handles which are currently signalled. (Only + # returning the first signalled might create starvation issues.) + L = list(handles) + ready = [] + while L: + res = _winapi.WaitForMultipleObjects(L, False, timeout) + if res == WAIT_TIMEOUT: + break + elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): + res -= WAIT_OBJECT_0 + elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): + res -= WAIT_ABANDONED_0 + else: + raise RuntimeError('Should not get here') + ready.append(L[res]) + L = L[res+1:] + timeout = 0 + return ready + + _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + if timeout is None: + timeout = INFINITE + elif timeout < 0: + timeout = 0 + else: + timeout = int(timeout * 1000 + 0.5) + + object_list = list(object_list) + waithandle_to_obj = {} + ov_list = [] + ready_objects = set() + ready_handles = set() + + try: + for o in object_list: + try: + fileno = getattr(o, 'fileno') + except AttributeError: + waithandle_to_obj[o.__index__()] = o + else: + # start an overlapped read of length zero + try: + ov, err = _winapi.ReadFile(fileno(), 0, True) + except OSError as e: + ov, err = None, e.winerror + if err not in _ready_errors: + raise + if err == _winapi.ERROR_IO_PENDING: + ov_list.append(ov) + waithandle_to_obj[ov.event] = o + else: + # If o.fileno() is an overlapped pipe handle and + # err == 0 then there is a zero length message + # in the pipe, but it HAS NOT been consumed... + if ov and sys.getwindowsversion()[:2] >= (6, 2): + # ... except on Windows 8 and later, where + # the message HAS been consumed. + try: + _, err = ov.GetOverlappedResult(False) + except OSError as e: + err = e.winerror + if not err and hasattr(o, '_got_empty_message'): + o._got_empty_message = True + ready_objects.add(o) + timeout = 0 + + ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) + finally: + # request that overlapped reads stop + for ov in ov_list: + ov.cancel() + + # wait for all overlapped reads to stop + for ov in ov_list: + try: + _, err = ov.GetOverlappedResult(True) + except OSError as e: + err = e.winerror + if err not in _ready_errors: + raise + if err != _winapi.ERROR_OPERATION_ABORTED: + o = waithandle_to_obj[ov.event] + ready_objects.add(o) + if err == 0: + # If o.fileno() is an overlapped pipe handle then + # a zero length message HAS been consumed. + if hasattr(o, '_got_empty_message'): + o._got_empty_message = True + + ready_objects.update(waithandle_to_obj[h] for h in ready_handles) + return [o for o in object_list if o in ready_objects] + +else: + + import selectors + + # poll/select have the advantage of not requiring any extra file + # descriptor, contrarily to epoll/kqueue (also, they require a single + # syscall). + if hasattr(selectors, 'PollSelector'): + _WaitSelector = selectors.PollSelector + else: + _WaitSelector = selectors.SelectSelector + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + with _WaitSelector() as selector: + for obj in object_list: + selector.register(obj, selectors.EVENT_READ) + + if timeout is not None: + deadline = getattr(time,'monotonic',time.time)() + timeout + + while True: + ready = selector.select(timeout) + if ready: + return [key.fileobj for (key, events) in ready] + else: + if timeout is not None: + timeout = deadline - getattr(time,'monotonic',time.time)() + if timeout < 0: + return ready + +# +# Make connection and socket objects sharable if possible +# + +if sys.platform == 'win32': + def reduce_connection(conn): + handle = conn.fileno() + with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: + from . import resource_sharer + ds = resource_sharer.DupSocket(s) + return rebuild_connection, (ds, conn.readable, conn.writable) + def rebuild_connection(ds, readable, writable): + sock = ds.detach() + return Connection(sock.detach(), readable, writable) + reduction.register(Connection, reduce_connection) + + def reduce_pipe_connection(conn): + access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | + (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) + dh = reduction.DupHandle(conn.fileno(), access) + return rebuild_pipe_connection, (dh, conn.readable, conn.writable) + def rebuild_pipe_connection(dh, readable, writable): + handle = dh.detach() + return PipeConnection(handle, readable, writable) + reduction.register(PipeConnection, reduce_pipe_connection) + +else: + def reduce_connection(conn): + df = reduction.DupFd(conn.fileno()) + return rebuild_connection, (df, conn.readable, conn.writable) + def rebuild_connection(df, readable, writable): + fd = df.detach() + return Connection(fd, readable, writable) + reduction.register(Connection, reduce_connection) diff --git a/lib/python3.10/site-packages/multiprocess/context.py b/lib/python3.10/site-packages/multiprocess/context.py new file mode 100644 index 0000000000000000000000000000000000000000..1208d205ee174a6a3c8f62710961813e2fb5f2df --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/context.py @@ -0,0 +1,362 @@ +import os +import sys +import threading + +from . import process +from . import reduction + +__all__ = () + +# +# Exceptions +# + +class ProcessError(Exception): + pass + +class BufferTooShort(ProcessError): + pass + +class TimeoutError(ProcessError): + pass + +class AuthenticationError(ProcessError): + pass + +# +# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py +# + +class BaseContext(object): + + ProcessError = ProcessError + BufferTooShort = BufferTooShort + TimeoutError = TimeoutError + AuthenticationError = AuthenticationError + + current_process = staticmethod(process.current_process) + parent_process = staticmethod(process.parent_process) + active_children = staticmethod(process.active_children) + + def cpu_count(self): + '''Returns the number of CPUs in the system''' + num = os.cpu_count() + if num is None: + raise NotImplementedError('cannot determine number of cpus') + else: + return num + + def Manager(self): + '''Returns a manager associated with a running server process + + The managers methods such as `Lock()`, `Condition()` and `Queue()` + can be used to create shared objects. + ''' + from .managers import SyncManager + m = SyncManager(ctx=self.get_context()) + m.start() + return m + + def Pipe(self, duplex=True): + '''Returns two connection object connected by a pipe''' + from .connection import Pipe + return Pipe(duplex) + + def Lock(self): + '''Returns a non-recursive lock object''' + from .synchronize import Lock + return Lock(ctx=self.get_context()) + + def RLock(self): + '''Returns a recursive lock object''' + from .synchronize import RLock + return RLock(ctx=self.get_context()) + + def Condition(self, lock=None): + '''Returns a condition object''' + from .synchronize import Condition + return Condition(lock, ctx=self.get_context()) + + def Semaphore(self, value=1): + '''Returns a semaphore object''' + from .synchronize import Semaphore + return Semaphore(value, ctx=self.get_context()) + + def BoundedSemaphore(self, value=1): + '''Returns a bounded semaphore object''' + from .synchronize import BoundedSemaphore + return BoundedSemaphore(value, ctx=self.get_context()) + + def Event(self): + '''Returns an event object''' + from .synchronize import Event + return Event(ctx=self.get_context()) + + def Barrier(self, parties, action=None, timeout=None): + '''Returns a barrier object''' + from .synchronize import Barrier + return Barrier(parties, action, timeout, ctx=self.get_context()) + + def Queue(self, maxsize=0): + '''Returns a queue object''' + from .queues import Queue + return Queue(maxsize, ctx=self.get_context()) + + def JoinableQueue(self, maxsize=0): + '''Returns a queue object''' + from .queues import JoinableQueue + return JoinableQueue(maxsize, ctx=self.get_context()) + + def SimpleQueue(self): + '''Returns a queue object''' + from .queues import SimpleQueue + return SimpleQueue(ctx=self.get_context()) + + def Pool(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None): + '''Returns a process pool object''' + from .pool import Pool + return Pool(processes, initializer, initargs, maxtasksperchild, + context=self.get_context()) + + def RawValue(self, typecode_or_type, *args): + '''Returns a shared object''' + from .sharedctypes import RawValue + return RawValue(typecode_or_type, *args) + + def RawArray(self, typecode_or_type, size_or_initializer): + '''Returns a shared array''' + from .sharedctypes import RawArray + return RawArray(typecode_or_type, size_or_initializer) + + def Value(self, typecode_or_type, *args, lock=True): + '''Returns a synchronized shared object''' + from .sharedctypes import Value + return Value(typecode_or_type, *args, lock=lock, + ctx=self.get_context()) + + def Array(self, typecode_or_type, size_or_initializer, *, lock=True): + '''Returns a synchronized shared array''' + from .sharedctypes import Array + return Array(typecode_or_type, size_or_initializer, lock=lock, + ctx=self.get_context()) + + def freeze_support(self): + '''Check whether this is a fake forked process in a frozen executable. + If so then run code specified by commandline and exit. + ''' + if sys.platform == 'win32' and getattr(sys, 'frozen', False): + from .spawn import freeze_support + freeze_support() + + def get_logger(self): + '''Return package logger -- if it does not already exist then + it is created. + ''' + from .util import get_logger + return get_logger() + + def log_to_stderr(self, level=None): + '''Turn on logging and add a handler which prints to stderr''' + from .util import log_to_stderr + return log_to_stderr(level) + + def allow_connection_pickling(self): + '''Install support for sending connections and sockets + between processes + ''' + # This is undocumented. In previous versions of multiprocessing + # its only effect was to make socket objects inheritable on Windows. + from . import connection + + def set_executable(self, executable): + '''Sets the path to a python.exe or pythonw.exe binary used to run + child processes instead of sys.executable when using the 'spawn' + start method. Useful for people embedding Python. + ''' + from .spawn import set_executable + set_executable(executable) + + def set_forkserver_preload(self, module_names): + '''Set list of module names to try to load in forkserver process. + This is really just a hint. + ''' + from .forkserver import set_forkserver_preload + set_forkserver_preload(module_names) + + def get_context(self, method=None): + if method is None: + return self + try: + ctx = _concrete_contexts[method] + except KeyError: + raise ValueError('cannot find context for %r' % method) from None + ctx._check_available() + return ctx + + def get_start_method(self, allow_none=False): + return self._name + + def set_start_method(self, method, force=False): + raise ValueError('cannot set start method of concrete context') + + @property + def reducer(self): + '''Controls how objects will be reduced to a form that can be + shared with other processes.''' + return globals().get('reduction') + + @reducer.setter + def reducer(self, reduction): + globals()['reduction'] = reduction + + def _check_available(self): + pass + +# +# Type of default context -- underlying context can be set at most once +# + +class Process(process.BaseProcess): + _start_method = None + @staticmethod + def _Popen(process_obj): + return _default_context.get_context().Process._Popen(process_obj) + +class DefaultContext(BaseContext): + Process = Process + + def __init__(self, context): + self._default_context = context + self._actual_context = None + + def get_context(self, method=None): + if method is None: + if self._actual_context is None: + self._actual_context = self._default_context + return self._actual_context + else: + return super().get_context(method) + + def set_start_method(self, method, force=False): + if self._actual_context is not None and not force: + raise RuntimeError('context has already been set') + if method is None and force: + self._actual_context = None + return + self._actual_context = self.get_context(method) + + def get_start_method(self, allow_none=False): + if self._actual_context is None: + if allow_none: + return None + self._actual_context = self._default_context + return self._actual_context._name + + def get_all_start_methods(self): + if sys.platform == 'win32': + return ['spawn'] + else: + methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] + if reduction.HAVE_SEND_HANDLE: + methods.append('forkserver') + return methods + + +# +# Context types for fixed start method +# + +if sys.platform != 'win32': + + class ForkProcess(process.BaseProcess): + _start_method = 'fork' + @staticmethod + def _Popen(process_obj): + from .popen_fork import Popen + return Popen(process_obj) + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_posix import Popen + return Popen(process_obj) + + class ForkServerProcess(process.BaseProcess): + _start_method = 'forkserver' + @staticmethod + def _Popen(process_obj): + from .popen_forkserver import Popen + return Popen(process_obj) + + class ForkContext(BaseContext): + _name = 'fork' + Process = ForkProcess + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + class ForkServerContext(BaseContext): + _name = 'forkserver' + Process = ForkServerProcess + def _check_available(self): + if not reduction.HAVE_SEND_HANDLE: + raise ValueError('forkserver start method not available') + + _concrete_contexts = { + 'fork': ForkContext(), + 'spawn': SpawnContext(), + 'forkserver': ForkServerContext(), + } + if sys.platform == 'darwin': + # bpo-33725: running arbitrary code after fork() is no longer reliable + # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. + _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn + else: + _default_context = DefaultContext(_concrete_contexts['fork']) + +else: + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_win32 import Popen + return Popen(process_obj) + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + _concrete_contexts = { + 'spawn': SpawnContext(), + } + _default_context = DefaultContext(_concrete_contexts['spawn']) + +# +# Force the start method +# + +def _force_start_method(method): + _default_context._actual_context = _concrete_contexts[method] + +# +# Check that the current thread is spawning a child process +# + +_tls = threading.local() + +def get_spawning_popen(): + return getattr(_tls, 'spawning_popen', None) + +def set_spawning_popen(popen): + _tls.spawning_popen = popen + +def assert_spawning(obj): + if get_spawning_popen() is None: + raise RuntimeError( + '%s objects should only be shared between processes' + ' through inheritance' % type(obj).__name__ + ) diff --git a/lib/python3.10/site-packages/multiprocess/dummy/__init__.py b/lib/python3.10/site-packages/multiprocess/dummy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1468609e347b3a0b9281e5c9e6ec311fcb37e5 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/dummy/__init__.py @@ -0,0 +1,126 @@ +# +# Support for the API of the multiprocessing package using threads +# +# multiprocessing/dummy/__init__.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' + ] + +# +# Imports +# + +import threading +import sys +import weakref +import array + +from .connection import Pipe +from threading import Lock, RLock, Semaphore, BoundedSemaphore +from threading import Event, Condition, Barrier +from queue import Queue + +# +# +# + +class DummyProcess(threading.Thread): + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): + threading.Thread.__init__(self, group, target, name, args, kwargs) + self._pid = None + self._children = weakref.WeakKeyDictionary() + self._start_called = False + self._parent = current_process() + + def start(self): + if self._parent is not current_process(): + raise RuntimeError( + "Parent is {0!r} but current_process is {1!r}".format( + self._parent, current_process())) + self._start_called = True + if hasattr(self._parent, '_children'): + self._parent._children[self] = None + threading.Thread.start(self) + + @property + def exitcode(self): + if self._start_called and not self.is_alive(): + return 0 + else: + return None + +# +# +# + +Process = DummyProcess +current_process = threading.current_thread +current_process()._children = weakref.WeakKeyDictionary() + +def active_children(): + children = current_process()._children + for p in list(children): + if not p.is_alive(): + children.pop(p, None) + return list(children) + +def freeze_support(): + pass + +# +# +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +dict = dict +list = list + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + + @property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value = value + + def __repr__(self): + return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) + +def Manager(): + return sys.modules[__name__] + +def shutdown(): + pass + +def Pool(processes=None, initializer=None, initargs=()): + from ..pool import ThreadPool + return ThreadPool(processes, initializer, initargs) + +JoinableQueue = Queue diff --git a/lib/python3.10/site-packages/multiprocess/dummy/connection.py b/lib/python3.10/site-packages/multiprocess/dummy/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ce320fcf514083f3a6477e87abf40e9719285a --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/dummy/connection.py @@ -0,0 +1,75 @@ +# +# Analogue of `multiprocessing.connection` which uses queues instead of sockets +# +# multiprocessing/dummy/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe' ] + +from queue import Queue + + +families = [None] + + +class Listener(object): + + def __init__(self, address=None, family=None, backlog=1): + self._backlog_queue = Queue(backlog) + + def accept(self): + return Connection(*self._backlog_queue.get()) + + def close(self): + self._backlog_queue = None + + @property + def address(self): + return self._backlog_queue + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address): + _in, _out = Queue(), Queue() + address.put((_out, _in)) + return Connection(_in, _out) + + +def Pipe(duplex=True): + a, b = Queue(), Queue() + return Connection(a, b), Connection(b, a) + + +class Connection(object): + + def __init__(self, _in, _out): + self._out = _out + self._in = _in + self.send = self.send_bytes = _out.put + self.recv = self.recv_bytes = _in.get + + def poll(self, timeout=0.0): + if self._in.qsize() > 0: + return True + if timeout <= 0.0: + return False + with self._in.not_empty: + self._in.not_empty.wait(timeout) + return self._in.qsize() > 0 + + def close(self): + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() diff --git a/lib/python3.10/site-packages/multiprocess/forkserver.py b/lib/python3.10/site-packages/multiprocess/forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..f988506e1a6f30f04d5680f62bf9ae965f993254 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/forkserver.py @@ -0,0 +1,347 @@ +import errno +import os +import selectors +import signal +import socket +import struct +import sys +import threading +import warnings + +from . import connection +from . import process +from .context import reduction +from . import resource_tracker +from . import spawn +from . import util + +__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', + 'set_forkserver_preload'] + +# +# +# + +MAXFDS_TO_SEND = 256 +SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t + +# +# Forkserver class +# + +class ForkServer(object): + + def __init__(self): + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + self._inherited_fds = None + self._lock = threading.Lock() + self._preload_modules = ['__main__'] + + def _stop(self): + # Method used by unit tests to stop the server + with self._lock: + self._stop_unlocked() + + def _stop_unlocked(self): + if self._forkserver_pid is None: + return + + # close the "alive" file descriptor asks the server to stop + os.close(self._forkserver_alive_fd) + self._forkserver_alive_fd = None + + os.waitpid(self._forkserver_pid, 0) + self._forkserver_pid = None + + if not util.is_abstract_socket_namespace(self._forkserver_address): + os.unlink(self._forkserver_address) + self._forkserver_address = None + + def set_forkserver_preload(self, modules_names): + '''Set list of module names to try to load in forkserver process.''' + if not all(type(mod) is str for mod in self._preload_modules): + raise TypeError('module_names must be a list of strings') + self._preload_modules = modules_names + + def get_inherited_fds(self): + '''Return list of fds inherited from parent process. + + This returns None if the current process was not started by fork + server. + ''' + return self._inherited_fds + + def connect_to_new_process(self, fds): + '''Request forkserver to create a child process. + + Returns a pair of fds (status_r, data_w). The calling process can read + the child process's pid and (eventually) its returncode from status_r. + The calling process should write to data_w the pickled preparation and + process data. + ''' + self.ensure_running() + if len(fds) + 4 >= MAXFDS_TO_SEND: + raise ValueError('too many fds') + with socket.socket(socket.AF_UNIX) as client: + client.connect(self._forkserver_address) + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + allfds = [child_r, child_w, self._forkserver_alive_fd, + resource_tracker.getfd()] + allfds += fds + try: + reduction.sendfds(client, allfds) + return parent_r, parent_w + except: + os.close(parent_r) + os.close(parent_w) + raise + finally: + os.close(child_r) + os.close(child_w) + + def ensure_running(self): + '''Make sure that a fork server is running. + + This can be called from any process. Note that usually a child + process will just reuse the forkserver started by its parent, so + ensure_running() will do nothing. + ''' + with self._lock: + resource_tracker.ensure_running() + if self._forkserver_pid is not None: + # forkserver was launched before, is it still running? + pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) + if not pid: + # still alive + return + # dead, launch it again + os.close(self._forkserver_alive_fd) + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + + cmd = ('from multiprocess.forkserver import main; ' + + 'main(%d, %d, %r, **%r)') + + if self._preload_modules: + desired_keys = {'main_path', 'sys_path'} + data = spawn.get_preparation_data('ignore') + data = {x: y for x, y in data.items() if x in desired_keys} + else: + data = {} + + with socket.socket(socket.AF_UNIX) as listener: + address = connection.arbitrary_address('AF_UNIX') + listener.bind(address) + if not util.is_abstract_socket_namespace(address): + os.chmod(address, 0o600) + listener.listen() + + # all client processes own the write end of the "alive" pipe; + # when they all terminate the read end becomes ready. + alive_r, alive_w = os.pipe() + try: + fds_to_pass = [listener.fileno(), alive_r] + cmd %= (listener.fileno(), alive_r, self._preload_modules, + data) + exe = spawn.get_executable() + args = [exe] + util._args_from_interpreter_flags() + args += ['-c', cmd] + pid = util.spawnv_passfds(exe, args, fds_to_pass) + except: + os.close(alive_w) + raise + finally: + os.close(alive_r) + self._forkserver_address = address + self._forkserver_alive_fd = alive_w + self._forkserver_pid = pid + +# +# +# + +def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): + '''Run forkserver.''' + if preload: + if '__main__' in preload and main_path is not None: + process.current_process()._inheriting = True + try: + spawn.import_main_path(main_path) + finally: + del process.current_process()._inheriting + for modname in preload: + try: + __import__(modname) + except ImportError: + pass + + util._close_stdin() + + sig_r, sig_w = os.pipe() + os.set_blocking(sig_r, False) + os.set_blocking(sig_w, False) + + def sigchld_handler(*_unused): + # Dummy signal handler, doesn't do anything + pass + + handlers = { + # unblocking SIGCHLD allows the wakeup fd to notify our event loop + signal.SIGCHLD: sigchld_handler, + # protect the process from ^C + signal.SIGINT: signal.SIG_IGN, + } + old_handlers = {sig: signal.signal(sig, val) + for (sig, val) in handlers.items()} + + # calling os.write() in the Python signal handler is racy + signal.set_wakeup_fd(sig_w) + + # map child pids to client fds + pid_to_fd = {} + + with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ + selectors.DefaultSelector() as selector: + _forkserver._forkserver_address = listener.getsockname() + + selector.register(listener, selectors.EVENT_READ) + selector.register(alive_r, selectors.EVENT_READ) + selector.register(sig_r, selectors.EVENT_READ) + + while True: + try: + while True: + rfds = [key.fileobj for (key, events) in selector.select()] + if rfds: + break + + if alive_r in rfds: + # EOF because no more client processes left + assert os.read(alive_r, 1) == b'', "Not at EOF?" + raise SystemExit + + if sig_r in rfds: + # Got SIGCHLD + os.read(sig_r, 65536) # exhaust + while True: + # Scan for child processes + try: + pid, sts = os.waitpid(-1, os.WNOHANG) + except ChildProcessError: + break + if pid == 0: + break + child_w = pid_to_fd.pop(pid, None) + if child_w is not None: + returncode = os.waitstatus_to_exitcode(sts) + # Send exit code to client process + try: + write_signed(child_w, returncode) + except BrokenPipeError: + # client vanished + pass + os.close(child_w) + else: + # This shouldn't happen really + warnings.warn('forkserver: waitpid returned ' + 'unexpected pid %d' % pid) + + if listener in rfds: + # Incoming fork request + with listener.accept()[0] as s: + # Receive fds from client + fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) + if len(fds) > MAXFDS_TO_SEND: + raise RuntimeError( + "Too many ({0:n}) fds to send".format( + len(fds))) + child_r, child_w, *fds = fds + s.close() + pid = os.fork() + if pid == 0: + # Child + code = 1 + try: + listener.close() + selector.close() + unused_fds = [alive_r, child_w, sig_r, sig_w] + unused_fds.extend(pid_to_fd.values()) + code = _serve_one(child_r, fds, + unused_fds, + old_handlers) + except Exception: + sys.excepthook(*sys.exc_info()) + sys.stderr.flush() + finally: + os._exit(code) + else: + # Send pid to client process + try: + write_signed(child_w, pid) + except BrokenPipeError: + # client vanished + pass + pid_to_fd[pid] = child_w + os.close(child_r) + for fd in fds: + os.close(fd) + + except OSError as e: + if e.errno != errno.ECONNABORTED: + raise + + +def _serve_one(child_r, fds, unused_fds, handlers): + # close unnecessary stuff and reset signal handlers + signal.set_wakeup_fd(-1) + for sig, val in handlers.items(): + signal.signal(sig, val) + for fd in unused_fds: + os.close(fd) + + (_forkserver._forkserver_alive_fd, + resource_tracker._resource_tracker._fd, + *_forkserver._inherited_fds) = fds + + # Run process object received over pipe + parent_sentinel = os.dup(child_r) + code = spawn._main(child_r, parent_sentinel) + + return code + + +# +# Read and write signed numbers +# + +def read_signed(fd): + data = b'' + length = SIGNED_STRUCT.size + while len(data) < length: + s = os.read(fd, length - len(data)) + if not s: + raise EOFError('unexpected EOF') + data += s + return SIGNED_STRUCT.unpack(data)[0] + +def write_signed(fd, n): + msg = SIGNED_STRUCT.pack(n) + while msg: + nbytes = os.write(fd, msg) + if nbytes == 0: + raise RuntimeError('should not get here') + msg = msg[nbytes:] + +# +# +# + +_forkserver = ForkServer() +ensure_running = _forkserver.ensure_running +get_inherited_fds = _forkserver.get_inherited_fds +connect_to_new_process = _forkserver.connect_to_new_process +set_forkserver_preload = _forkserver.set_forkserver_preload diff --git a/lib/python3.10/site-packages/multiprocess/heap.py b/lib/python3.10/site-packages/multiprocess/heap.py new file mode 100644 index 0000000000000000000000000000000000000000..6217dfe12689b379f2dad6f1e4bc3bbf6af8f60a --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/heap.py @@ -0,0 +1,337 @@ +# +# Module which supports allocation of memory from an mmap +# +# multiprocessing/heap.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import bisect +from collections import defaultdict +import mmap +import os +import sys +import tempfile +import threading + +from .context import reduction, assert_spawning +from . import util + +__all__ = ['BufferWrapper'] + +# +# Inheritable class which wraps an mmap, and from which blocks can be allocated +# + +if sys.platform == 'win32': + + import _winapi + + class Arena(object): + """ + A shared memory area backed by anonymous memory (Windows). + """ + + _rand = tempfile._RandomNameSequence() + + def __init__(self, size): + self.size = size + for i in range(100): + name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) + buf = mmap.mmap(-1, size, tagname=name) + if _winapi.GetLastError() == 0: + break + # We have reopened a preexisting mmap. + buf.close() + else: + raise FileExistsError('Cannot find name for new mmap') + self.name = name + self.buffer = buf + self._state = (self.size, self.name) + + def __getstate__(self): + assert_spawning(self) + return self._state + + def __setstate__(self, state): + self.size, self.name = self._state = state + # Reopen existing mmap + self.buffer = mmap.mmap(-1, self.size, tagname=self.name) + # XXX Temporarily preventing buildbot failures while determining + # XXX the correct long-term fix. See issue 23060 + #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS + +else: + + class Arena(object): + """ + A shared memory area backed by a temporary file (POSIX). + """ + + if sys.platform == 'linux': + _dir_candidates = ['/dev/shm'] + else: + _dir_candidates = [] + + def __init__(self, size, fd=-1): + self.size = size + self.fd = fd + if fd == -1: + # Arena is created anew (if fd != -1, it means we're coming + # from rebuild_arena() below) + self.fd, name = tempfile.mkstemp( + prefix='pym-%d-'%os.getpid(), + dir=self._choose_dir(size)) + os.unlink(name) + util.Finalize(self, os.close, (self.fd,)) + os.ftruncate(self.fd, size) + self.buffer = mmap.mmap(self.fd, self.size) + + def _choose_dir(self, size): + # Choose a non-storage backed directory if possible, + # to improve performance + for d in self._dir_candidates: + st = os.statvfs(d) + if st.f_bavail * st.f_frsize >= size: # enough free space? + return d + return util.get_temp_dir() + + def reduce_arena(a): + if a.fd == -1: + raise ValueError('Arena is unpicklable because ' + 'forking was enabled when it was created') + return rebuild_arena, (a.size, reduction.DupFd(a.fd)) + + def rebuild_arena(size, dupfd): + return Arena(size, dupfd.detach()) + + reduction.register(Arena, reduce_arena) + +# +# Class allowing allocation of chunks of memory from arenas +# + +class Heap(object): + + # Minimum malloc() alignment + _alignment = 8 + + _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB + _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 + + def __init__(self, size=mmap.PAGESIZE): + self._lastpid = os.getpid() + self._lock = threading.Lock() + # Current arena allocation size + self._size = size + # A sorted list of available block sizes in arenas + self._lengths = [] + + # Free block management: + # - map each block size to a list of `(Arena, start, stop)` blocks + self._len_to_seq = {} + # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block + # starting at that offset + self._start_to_block = {} + # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block + # ending at that offset + self._stop_to_block = {} + + # Map arenas to their `(Arena, start, stop)` blocks in use + self._allocated_blocks = defaultdict(set) + self._arenas = [] + + # List of pending blocks to free - see comment in free() below + self._pending_free_blocks = [] + + # Statistics + self._n_mallocs = 0 + self._n_frees = 0 + + @staticmethod + def _roundup(n, alignment): + # alignment must be a power of 2 + mask = alignment - 1 + return (n + mask) & ~mask + + def _new_arena(self, size): + # Create a new arena with at least the given *size* + length = self._roundup(max(self._size, size), mmap.PAGESIZE) + # We carve larger and larger arenas, for efficiency, until we + # reach a large-ish size (roughly L3 cache-sized) + if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: + self._size *= 2 + util.info('allocating a new mmap of length %d', length) + arena = Arena(length) + self._arenas.append(arena) + return (arena, 0, length) + + def _discard_arena(self, arena): + # Possibly delete the given (unused) arena + length = arena.size + # Reusing an existing arena is faster than creating a new one, so + # we only reclaim space if it's large enough. + if length < self._DISCARD_FREE_SPACE_LARGER_THAN: + return + blocks = self._allocated_blocks.pop(arena) + assert not blocks + del self._start_to_block[(arena, 0)] + del self._stop_to_block[(arena, length)] + self._arenas.remove(arena) + seq = self._len_to_seq[length] + seq.remove((arena, 0, length)) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + def _malloc(self, size): + # returns a large enough block -- it might be much larger + i = bisect.bisect_left(self._lengths, size) + if i == len(self._lengths): + return self._new_arena(size) + else: + length = self._lengths[i] + seq = self._len_to_seq[length] + block = seq.pop() + if not seq: + del self._len_to_seq[length], self._lengths[i] + + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + return block + + def _add_free_block(self, block): + # make block available and try to merge with its neighbours in the arena + (arena, start, stop) = block + + try: + prev_block = self._stop_to_block[(arena, start)] + except KeyError: + pass + else: + start, _ = self._absorb(prev_block) + + try: + next_block = self._start_to_block[(arena, stop)] + except KeyError: + pass + else: + _, stop = self._absorb(next_block) + + block = (arena, start, stop) + length = stop - start + + try: + self._len_to_seq[length].append(block) + except KeyError: + self._len_to_seq[length] = [block] + bisect.insort(self._lengths, length) + + self._start_to_block[(arena, start)] = block + self._stop_to_block[(arena, stop)] = block + + def _absorb(self, block): + # deregister this block so it can be merged with a neighbour + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + + length = stop - start + seq = self._len_to_seq[length] + seq.remove(block) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + return start, stop + + def _remove_allocated_block(self, block): + arena, start, stop = block + blocks = self._allocated_blocks[arena] + blocks.remove((start, stop)) + if not blocks: + # Arena is entirely free, discard it from this process + self._discard_arena(arena) + + def _free_pending_blocks(self): + # Free all the blocks in the pending list - called with the lock held. + while True: + try: + block = self._pending_free_blocks.pop() + except IndexError: + break + self._add_free_block(block) + self._remove_allocated_block(block) + + def free(self, block): + # free a block returned by malloc() + # Since free() can be called asynchronously by the GC, it could happen + # that it's called while self._lock is held: in that case, + # self._lock.acquire() would deadlock (issue #12352). To avoid that, a + # trylock is used instead, and if the lock can't be acquired + # immediately, the block is added to a list of blocks to be freed + # synchronously sometimes later from malloc() or free(), by calling + # _free_pending_blocks() (appending and retrieving from a list is not + # strictly thread-safe but under CPython it's atomic thanks to the GIL). + if os.getpid() != self._lastpid: + raise ValueError( + "My pid ({0:n}) is not last pid {1:n}".format( + os.getpid(),self._lastpid)) + if not self._lock.acquire(False): + # can't acquire the lock right now, add the block to the list of + # pending blocks to free + self._pending_free_blocks.append(block) + else: + # we hold the lock + try: + self._n_frees += 1 + self._free_pending_blocks() + self._add_free_block(block) + self._remove_allocated_block(block) + finally: + self._lock.release() + + def malloc(self, size): + # return a block of right size (possibly rounded up) + if size < 0: + raise ValueError("Size {0:n} out of range".format(size)) + if sys.maxsize <= size: + raise OverflowError("Size {0:n} too large".format(size)) + if os.getpid() != self._lastpid: + self.__init__() # reinitialize after fork + with self._lock: + self._n_mallocs += 1 + # allow pending blocks to be marked available + self._free_pending_blocks() + size = self._roundup(max(size, 1), self._alignment) + (arena, start, stop) = self._malloc(size) + real_stop = start + size + if real_stop < stop: + # if the returned block is larger than necessary, mark + # the remainder available + self._add_free_block((arena, real_stop, stop)) + self._allocated_blocks[arena].add((start, real_stop)) + return (arena, start, real_stop) + +# +# Class wrapping a block allocated out of a Heap -- can be inherited by child process +# + +class BufferWrapper(object): + + _heap = Heap() + + def __init__(self, size): + if size < 0: + raise ValueError("Size {0:n} out of range".format(size)) + if sys.maxsize <= size: + raise OverflowError("Size {0:n} too large".format(size)) + block = BufferWrapper._heap.malloc(size) + self._state = (block, size) + util.Finalize(self, BufferWrapper._heap.free, args=(block,)) + + def create_memoryview(self): + (arena, start, stop), size = self._state + return memoryview(arena.buffer)[start:start+size] diff --git a/lib/python3.10/site-packages/multiprocess/managers.py b/lib/python3.10/site-packages/multiprocess/managers.py new file mode 100644 index 0000000000000000000000000000000000000000..cf4e69d211d211c23852ee68d02312b61087c47d --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/managers.py @@ -0,0 +1,1369 @@ +# +# Module providing manager classes for dealing +# with shared objects +# +# multiprocessing/managers.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token', + 'SharedMemoryManager' ] + +# +# Imports +# + +import sys +import threading +import signal +import array +import queue +import time +import types +import os +from os import getpid + +from traceback import format_exc + +from . import connection +from .context import reduction, get_spawning_popen, ProcessError +from . import pool +from . import process +from . import util +from . import get_context +try: + from . import shared_memory + HAS_SHMEM = True +except ImportError: + HAS_SHMEM = False + +# +# Register some things for pickling +# + +def reduce_array(a): + return array.array, (a.typecode, a.tobytes()) +reduction.register(array.array, reduce_array) + +view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] +if view_types[0] is not list: # only needed in Py3.0 + def rebuild_as_list(obj): + return list, (list(obj),) + for view_type in view_types: + reduction.register(view_type, rebuild_as_list) + +# +# Type for identifying shared objects +# + +class Token(object): + ''' + Type to uniquely identify a shared object + ''' + __slots__ = ('typeid', 'address', 'id') + + def __init__(self, typeid, address, id): + (self.typeid, self.address, self.id) = (typeid, address, id) + + def __getstate__(self): + return (self.typeid, self.address, self.id) + + def __setstate__(self, state): + (self.typeid, self.address, self.id) = state + + def __repr__(self): + return '%s(typeid=%r, address=%r, id=%r)' % \ + (self.__class__.__name__, self.typeid, self.address, self.id) + +# +# Function for communication with a manager's server process +# + +def dispatch(c, id, methodname, args=(), kwds={}): + ''' + Send a message to manager using connection `c` and return response + ''' + c.send((id, methodname, args, kwds)) + kind, result = c.recv() + if kind == '#RETURN': + return result + raise convert_to_error(kind, result) + +def convert_to_error(kind, result): + if kind == '#ERROR': + return result + elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): + if not isinstance(result, str): + raise TypeError( + "Result {0!r} (kind '{1}') type is {2}, not str".format( + result, kind, type(result))) + if kind == '#UNSERIALIZABLE': + return RemoteError('Unserializable message: %s\n' % result) + else: + return RemoteError(result) + else: + return ValueError('Unrecognized message type {!r}'.format(kind)) + +class RemoteError(Exception): + def __str__(self): + return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) + +# +# Functions for finding the method names of an object +# + +def all_methods(obj): + ''' + Return a list of names of methods of `obj` + ''' + temp = [] + for name in dir(obj): + func = getattr(obj, name) + if callable(func): + temp.append(name) + return temp + +def public_methods(obj): + ''' + Return a list of names of methods of `obj` which do not start with '_' + ''' + return [name for name in all_methods(obj) if name[0] != '_'] + +# +# Server which is run in a process controlled by a manager +# + +class Server(object): + ''' + Server class which runs in a process controlled by a manager object + ''' + public = ['shutdown', 'create', 'accept_connection', 'get_methods', + 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] + + def __init__(self, registry, address, authkey, serializer): + if not isinstance(authkey, bytes): + raise TypeError( + "Authkey {0!r} is type {1!s}, not bytes".format( + authkey, type(authkey))) + self.registry = registry + self.authkey = process.AuthenticationString(authkey) + Listener, Client = listener_client[serializer] + + # do authentication later + self.listener = Listener(address=address, backlog=16) + self.address = self.listener.address + + self.id_to_obj = {'0': (None, ())} + self.id_to_refcount = {} + self.id_to_local_proxy_obj = {} + self.mutex = threading.Lock() + + def serve_forever(self): + ''' + Run the server forever + ''' + self.stop_event = threading.Event() + process.current_process()._manager_server = self + try: + accepter = threading.Thread(target=self.accepter) + accepter.daemon = True + accepter.start() + try: + while not self.stop_event.is_set(): + self.stop_event.wait(1) + except (KeyboardInterrupt, SystemExit): + pass + finally: + if sys.stdout != sys.__stdout__: # what about stderr? + util.debug('resetting stdout, stderr') + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + sys.exit(0) + + def accepter(self): + while True: + try: + c = self.listener.accept() + except OSError: + continue + t = threading.Thread(target=self.handle_request, args=(c,)) + t.daemon = True + t.start() + + def handle_request(self, c): + ''' + Handle a new connection + ''' + funcname = result = request = None + try: + connection.deliver_challenge(c, self.authkey) + connection.answer_challenge(c, self.authkey) + request = c.recv() + ignore, funcname, args, kwds = request + assert funcname in self.public, '%r unrecognized' % funcname + func = getattr(self, funcname) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + try: + result = func(c, *args, **kwds) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + msg = ('#RETURN', result) + try: + c.send(msg) + except Exception as e: + try: + c.send(('#TRACEBACK', format_exc())) + except Exception: + pass + util.info('Failure to send message: %r', msg) + util.info(' ... request was %r', request) + util.info(' ... exception was %r', e) + + c.close() + + def serve_client(self, conn): + ''' + Handle requests from the proxies in a particular process/thread + ''' + util.debug('starting server thread to service %r', + threading.current_thread().name) + + recv = conn.recv + send = conn.send + id_to_obj = self.id_to_obj + + while not self.stop_event.is_set(): + + try: + methodname = obj = None + request = recv() + ident, methodname, args, kwds = request + try: + obj, exposed, gettypeid = id_to_obj[ident] + except KeyError as ke: + try: + obj, exposed, gettypeid = \ + self.id_to_local_proxy_obj[ident] + except KeyError: + raise ke + + if methodname not in exposed: + raise AttributeError( + 'method %r of %r object is not in exposed=%r' % + (methodname, type(obj), exposed) + ) + + function = getattr(obj, methodname) + + try: + res = function(*args, **kwds) + except Exception as e: + msg = ('#ERROR', e) + else: + typeid = gettypeid and gettypeid.get(methodname, None) + if typeid: + rident, rexposed = self.create(conn, typeid, res) + token = Token(typeid, self.address, rident) + msg = ('#PROXY', (rexposed, token)) + else: + msg = ('#RETURN', res) + + except AttributeError: + if methodname is None: + msg = ('#TRACEBACK', format_exc()) + else: + try: + fallback_func = self.fallback_mapping[methodname] + result = fallback_func( + self, conn, ident, obj, *args, **kwds + ) + msg = ('#RETURN', result) + except Exception: + msg = ('#TRACEBACK', format_exc()) + + except EOFError: + util.debug('got EOF -- exiting thread serving %r', + threading.current_thread().name) + sys.exit(0) + + except Exception: + msg = ('#TRACEBACK', format_exc()) + + try: + try: + send(msg) + except Exception: + send(('#UNSERIALIZABLE', format_exc())) + except Exception as e: + util.info('exception in thread serving %r', + threading.current_thread().name) + util.info(' ... message was %r', msg) + util.info(' ... exception was %r', e) + conn.close() + sys.exit(1) + + def fallback_getvalue(self, conn, ident, obj): + return obj + + def fallback_str(self, conn, ident, obj): + return str(obj) + + def fallback_repr(self, conn, ident, obj): + return repr(obj) + + fallback_mapping = { + '__str__':fallback_str, + '__repr__':fallback_repr, + '#GETVALUE':fallback_getvalue + } + + def dummy(self, c): + pass + + def debug_info(self, c): + ''' + Return some info --- useful to spot problems with refcounting + ''' + # Perhaps include debug info about 'c'? + with self.mutex: + result = [] + keys = list(self.id_to_refcount.keys()) + keys.sort() + for ident in keys: + if ident != '0': + result.append(' %s: refcount=%s\n %s' % + (ident, self.id_to_refcount[ident], + str(self.id_to_obj[ident][0])[:75])) + return '\n'.join(result) + + def number_of_objects(self, c): + ''' + Number of shared objects + ''' + # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' + return len(self.id_to_refcount) + + def shutdown(self, c): + ''' + Shutdown this process + ''' + try: + util.debug('manager received shutdown message') + c.send(('#RETURN', None)) + except: + import traceback + traceback.print_exc() + finally: + self.stop_event.set() + + def create(self, c, typeid, /, *args, **kwds): + ''' + Create a new shared object and return its id + ''' + with self.mutex: + callable, exposed, method_to_typeid, proxytype = \ + self.registry[typeid] + + if callable is None: + if kwds or (len(args) != 1): + raise ValueError( + "Without callable, must have one non-keyword argument") + obj = args[0] + else: + obj = callable(*args, **kwds) + + if exposed is None: + exposed = public_methods(obj) + if method_to_typeid is not None: + if not isinstance(method_to_typeid, dict): + raise TypeError( + "Method_to_typeid {0!r}: type {1!s}, not dict".format( + method_to_typeid, type(method_to_typeid))) + exposed = list(exposed) + list(method_to_typeid) + + ident = '%x' % id(obj) # convert to string because xmlrpclib + # only has 32 bit signed integers + util.debug('%r callable returned object with id %r', typeid, ident) + + self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) + if ident not in self.id_to_refcount: + self.id_to_refcount[ident] = 0 + + self.incref(c, ident) + return ident, tuple(exposed) + + def get_methods(self, c, token): + ''' + Return the methods of the shared object indicated by token + ''' + return tuple(self.id_to_obj[token.id][1]) + + def accept_connection(self, c, name): + ''' + Spawn a new thread to serve this connection + ''' + threading.current_thread().name = name + c.send(('#RETURN', None)) + self.serve_client(c) + + def incref(self, c, ident): + with self.mutex: + try: + self.id_to_refcount[ident] += 1 + except KeyError as ke: + # If no external references exist but an internal (to the + # manager) still does and a new external reference is created + # from it, restore the manager's tracking of it from the + # previously stashed internal ref. + if ident in self.id_to_local_proxy_obj: + self.id_to_refcount[ident] = 1 + self.id_to_obj[ident] = \ + self.id_to_local_proxy_obj[ident] + obj, exposed, gettypeid = self.id_to_obj[ident] + util.debug('Server re-enabled tracking & INCREF %r', ident) + else: + raise ke + + def decref(self, c, ident): + if ident not in self.id_to_refcount and \ + ident in self.id_to_local_proxy_obj: + util.debug('Server DECREF skipping %r', ident) + return + + with self.mutex: + if self.id_to_refcount[ident] <= 0: + raise AssertionError( + "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( + ident, self.id_to_obj[ident], + self.id_to_refcount[ident])) + self.id_to_refcount[ident] -= 1 + if self.id_to_refcount[ident] == 0: + del self.id_to_refcount[ident] + + if ident not in self.id_to_refcount: + # Two-step process in case the object turns out to contain other + # proxy objects (e.g. a managed list of managed lists). + # Otherwise, deleting self.id_to_obj[ident] would trigger the + # deleting of the stored value (another managed object) which would + # in turn attempt to acquire the mutex that is already held here. + self.id_to_obj[ident] = (None, (), None) # thread-safe + util.debug('disposing of obj with id %r', ident) + with self.mutex: + del self.id_to_obj[ident] + + +# +# Class to represent state of a manager +# + +class State(object): + __slots__ = ['value'] + INITIAL = 0 + STARTED = 1 + SHUTDOWN = 2 + +# +# Mapping from serializer name to Listener and Client types +# + +listener_client = { #XXX: register dill? + 'pickle' : (connection.Listener, connection.Client), + 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) + } + +# +# Definition of BaseManager +# + +class BaseManager(object): + ''' + Base class for managers + ''' + _registry = {} + _Server = Server + + def __init__(self, address=None, authkey=None, serializer='pickle', + ctx=None): + if authkey is None: + authkey = process.current_process().authkey + self._address = address # XXX not final address if eg ('', 0) + self._authkey = process.AuthenticationString(authkey) + self._state = State() + self._state.value = State.INITIAL + self._serializer = serializer + self._Listener, self._Client = listener_client[serializer] + self._ctx = ctx or get_context() + + def get_server(self): + ''' + Return server object with serve_forever() method and address attribute + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return Server(self._registry, self._address, + self._authkey, self._serializer) + + def connect(self): + ''' + Connect manager object to the server process + ''' + Listener, Client = listener_client[self._serializer] + conn = Client(self._address, authkey=self._authkey) + dispatch(conn, None, 'dummy') + self._state.value = State.STARTED + + def start(self, initializer=None, initargs=()): + ''' + Spawn a server process for this manager object + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + # pipe over which we will retrieve address of server + reader, writer = connection.Pipe(duplex=False) + + # spawn process which runs a server + self._process = self._ctx.Process( + target=type(self)._run_server, + args=(self._registry, self._address, self._authkey, + self._serializer, writer, initializer, initargs), + ) + ident = ':'.join(str(i) for i in self._process._identity) + self._process.name = type(self).__name__ + '-' + ident + self._process.start() + + # get address of server + writer.close() + self._address = reader.recv() + reader.close() + + # register a finalizer + self._state.value = State.STARTED + self.shutdown = util.Finalize( + self, type(self)._finalize_manager, + args=(self._process, self._address, self._authkey, + self._state, self._Client), + exitpriority=0 + ) + + @classmethod + def _run_server(cls, registry, address, authkey, serializer, writer, + initializer=None, initargs=()): + ''' + Create a server, report its address and run it + ''' + # bpo-36368: protect server process from KeyboardInterrupt signals + signal.signal(signal.SIGINT, signal.SIG_IGN) + + if initializer is not None: + initializer(*initargs) + + # create server + server = cls._Server(registry, address, authkey, serializer) + + # inform parent process of the server's address + writer.send(server.address) + writer.close() + + # run the manager + util.info('manager serving at %r', server.address) + server.serve_forever() + + def _create(self, typeid, /, *args, **kwds): + ''' + Create a new shared object; return the token and exposed tuple + ''' + assert self._state.value == State.STARTED, 'server not yet started' + conn = self._Client(self._address, authkey=self._authkey) + try: + id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) + finally: + conn.close() + return Token(typeid, self._address, id), exposed + + def join(self, timeout=None): + ''' + Join the manager process (if it has been spawned) + ''' + if self._process is not None: + self._process.join(timeout) + if not self._process.is_alive(): + self._process = None + + def _debug_info(self): + ''' + Return some info about the servers shared objects and connections + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'debug_info') + finally: + conn.close() + + def _number_of_objects(self): + ''' + Return the number of shared objects + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'number_of_objects') + finally: + conn.close() + + def __enter__(self): + if self._state.value == State.INITIAL: + self.start() + if self._state.value != State.STARTED: + if self._state.value == State.INITIAL: + raise ProcessError("Unable to start server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown() + + @staticmethod + def _finalize_manager(process, address, authkey, state, _Client): + ''' + Shutdown the manager process; will be registered as a finalizer + ''' + if process.is_alive(): + util.info('sending shutdown message to manager') + try: + conn = _Client(address, authkey=authkey) + try: + dispatch(conn, None, 'shutdown') + finally: + conn.close() + except Exception: + pass + + process.join(timeout=1.0) + if process.is_alive(): + util.info('manager still alive') + if hasattr(process, 'terminate'): + util.info('trying to `terminate()` manager process') + process.terminate() + process.join(timeout=0.1) + if process.is_alive(): + util.info('manager still alive after terminate') + + state.value = State.SHUTDOWN + try: + del BaseProxy._address_to_local[address] + except KeyError: + pass + + @property + def address(self): + return self._address + + @classmethod + def register(cls, typeid, callable=None, proxytype=None, exposed=None, + method_to_typeid=None, create_method=True): + ''' + Register a typeid with the manager type + ''' + if '_registry' not in cls.__dict__: + cls._registry = cls._registry.copy() + + if proxytype is None: + proxytype = AutoProxy + + exposed = exposed or getattr(proxytype, '_exposed_', None) + + method_to_typeid = method_to_typeid or \ + getattr(proxytype, '_method_to_typeid_', None) + + if method_to_typeid: + for key, value in list(method_to_typeid.items()): # isinstance? + assert type(key) is str, '%r is not a string' % key + assert type(value) is str, '%r is not a string' % value + + cls._registry[typeid] = ( + callable, exposed, method_to_typeid, proxytype + ) + + if create_method: + def temp(self, /, *args, **kwds): + util.debug('requesting creation of a shared %r object', typeid) + token, exp = self._create(typeid, *args, **kwds) + proxy = proxytype( + token, self._serializer, manager=self, + authkey=self._authkey, exposed=exp + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + temp.__name__ = typeid + setattr(cls, typeid, temp) + +# +# Subclass of set which get cleared after a fork +# + +class ProcessLocalSet(set): + def __init__(self): + util.register_after_fork(self, lambda obj: obj.clear()) + def __reduce__(self): + return type(self), () + +# +# Definition of BaseProxy +# + +class BaseProxy(object): + ''' + A base for proxies of shared objects + ''' + _address_to_local = {} + _mutex = util.ForkAwareThreadLock() + + def __init__(self, token, serializer, manager=None, + authkey=None, exposed=None, incref=True, manager_owned=False): + with BaseProxy._mutex: + tls_idset = BaseProxy._address_to_local.get(token.address, None) + if tls_idset is None: + tls_idset = util.ForkAwareLocal(), ProcessLocalSet() + BaseProxy._address_to_local[token.address] = tls_idset + + # self._tls is used to record the connection used by this + # thread to communicate with the manager at token.address + self._tls = tls_idset[0] + + # self._idset is used to record the identities of all shared + # objects for which the current process owns references and + # which are in the manager at token.address + self._idset = tls_idset[1] + + self._token = token + self._id = self._token.id + self._manager = manager + self._serializer = serializer + self._Client = listener_client[serializer][1] + + # Should be set to True only when a proxy object is being created + # on the manager server; primary use case: nested proxy objects. + # RebuildProxy detects when a proxy is being created on the manager + # and sets this value appropriately. + self._owned_by_manager = manager_owned + + if authkey is not None: + self._authkey = process.AuthenticationString(authkey) + elif self._manager is not None: + self._authkey = self._manager._authkey + else: + self._authkey = process.current_process().authkey + + if incref: + self._incref() + + util.register_after_fork(self, BaseProxy._after_fork) + + def _connect(self): + util.debug('making connection to manager') + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'accept_connection', (name,)) + self._tls.connection = conn + + def _callmethod(self, methodname, args=(), kwds={}): + ''' + Try to call a method of the referent and return a copy of the result + ''' + try: + conn = self._tls.connection + except AttributeError: + util.debug('thread %r does not own a connection', + threading.current_thread().name) + self._connect() + conn = self._tls.connection + + conn.send((self._id, methodname, args, kwds)) + kind, result = conn.recv() + + if kind == '#RETURN': + return result + elif kind == '#PROXY': + exposed, token = result + proxytype = self._manager._registry[token.typeid][-1] + token.address = self._token.address + proxy = proxytype( + token, self._serializer, manager=self._manager, + authkey=self._authkey, exposed=exposed + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + raise convert_to_error(kind, result) + + def _getvalue(self): + ''' + Get a copy of the value of the referent + ''' + return self._callmethod('#GETVALUE') + + def _incref(self): + if self._owned_by_manager: + util.debug('owned_by_manager skipped INCREF of %r', self._token.id) + return + + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'incref', (self._id,)) + util.debug('INCREF %r', self._token.id) + + self._idset.add(self._id) + + state = self._manager and self._manager._state + + self._close = util.Finalize( + self, BaseProxy._decref, + args=(self._token, self._authkey, state, + self._tls, self._idset, self._Client), + exitpriority=10 + ) + + @staticmethod + def _decref(token, authkey, state, tls, idset, _Client): + idset.discard(token.id) + + # check whether manager is still alive + if state is None or state.value == State.STARTED: + # tell manager this process no longer cares about referent + try: + util.debug('DECREF %r', token.id) + conn = _Client(token.address, authkey=authkey) + dispatch(conn, None, 'decref', (token.id,)) + except Exception as e: + util.debug('... decref failed %s', e) + + else: + util.debug('DECREF %r -- manager already shutdown', token.id) + + # check whether we can close this thread's connection because + # the process owns no more references to objects for this manager + if not idset and hasattr(tls, 'connection'): + util.debug('thread %r has no more proxies so closing conn', + threading.current_thread().name) + tls.connection.close() + del tls.connection + + def _after_fork(self): + self._manager = None + try: + self._incref() + except Exception as e: + # the proxy may just be for a manager which has shutdown + util.info('incref failed: %s' % e) + + def __reduce__(self): + kwds = {} + if get_spawning_popen() is not None: + kwds['authkey'] = self._authkey + + if getattr(self, '_isauto', False): + kwds['exposed'] = self._exposed_ + return (RebuildProxy, + (AutoProxy, self._token, self._serializer, kwds)) + else: + return (RebuildProxy, + (type(self), self._token, self._serializer, kwds)) + + def __deepcopy__(self, memo): + return self._getvalue() + + def __repr__(self): + return '<%s object, typeid %r at %#x>' % \ + (type(self).__name__, self._token.typeid, id(self)) + + def __str__(self): + ''' + Return representation of the referent (or a fall-back if that fails) + ''' + try: + return self._callmethod('__repr__') + except Exception: + return repr(self)[:-1] + "; '__str__()' failed>" + +# +# Function used for unpickling +# + +def RebuildProxy(func, token, serializer, kwds): + ''' + Function used for unpickling proxy objects. + ''' + server = getattr(process.current_process(), '_manager_server', None) + if server and server.address == token.address: + util.debug('Rebuild a proxy owned by manager, token=%r', token) + kwds['manager_owned'] = True + if token.id not in server.id_to_local_proxy_obj: + server.id_to_local_proxy_obj[token.id] = \ + server.id_to_obj[token.id] + incref = ( + kwds.pop('incref', True) and + not getattr(process.current_process(), '_inheriting', False) + ) + return func(token, serializer, incref=incref, **kwds) + +# +# Functions to create proxies and proxy types +# + +def MakeProxyType(name, exposed, _cache={}): + ''' + Return a proxy type whose methods are given by `exposed` + ''' + exposed = tuple(exposed) + try: + return _cache[(name, exposed)] + except KeyError: + pass + + dic = {} + + for meth in exposed: + exec('''def %s(self, /, *args, **kwds): + return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) + + ProxyType = type(name, (BaseProxy,), dic) + ProxyType._exposed_ = exposed + _cache[(name, exposed)] = ProxyType + return ProxyType + + +def AutoProxy(token, serializer, manager=None, authkey=None, + exposed=None, incref=True): + ''' + Return an auto-proxy for `token` + ''' + _Client = listener_client[serializer][1] + + if exposed is None: + conn = _Client(token.address, authkey=authkey) + try: + exposed = dispatch(conn, None, 'get_methods', (token,)) + finally: + conn.close() + + if authkey is None and manager is not None: + authkey = manager._authkey + if authkey is None: + authkey = process.current_process().authkey + + ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) + proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, + incref=incref) + proxy._isauto = True + return proxy + +# +# Types/callables which we will register with SyncManager +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + def get(self): + return self._value + def set(self, value): + self._value = value + def __repr__(self): + return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) + value = property(get, set) + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +# +# Proxy types used by SyncManager +# + +class IteratorProxy(BaseProxy): + _exposed_ = ('__next__', 'send', 'throw', 'close') + def __iter__(self): + return self + def __next__(self, *args): + return self._callmethod('__next__', args) + def send(self, *args): + return self._callmethod('send', args) + def throw(self, *args): + return self._callmethod('throw', args) + def close(self, *args): + return self._callmethod('close', args) + + +class AcquirerProxy(BaseProxy): + _exposed_ = ('acquire', 'release') + def acquire(self, blocking=True, timeout=None): + args = (blocking,) if timeout is None else (blocking, timeout) + return self._callmethod('acquire', args) + def release(self): + return self._callmethod('release') + def __enter__(self): + return self._callmethod('acquire') + def __exit__(self, exc_type, exc_val, exc_tb): + return self._callmethod('release') + + +class ConditionProxy(AcquirerProxy): + _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def notify(self, n=1): + return self._callmethod('notify', (n,)) + def notify_all(self): + return self._callmethod('notify_all') + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = getattr(time,'monotonic',time.time)() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - getattr(time,'monotonic',time.time)() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +class EventProxy(BaseProxy): + _exposed_ = ('is_set', 'set', 'clear', 'wait') + def is_set(self): + return self._callmethod('is_set') + def set(self): + return self._callmethod('set') + def clear(self): + return self._callmethod('clear') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + + +class BarrierProxy(BaseProxy): + _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def abort(self): + return self._callmethod('abort') + def reset(self): + return self._callmethod('reset') + @property + def parties(self): + return self._callmethod('__getattribute__', ('parties',)) + @property + def n_waiting(self): + return self._callmethod('__getattribute__', ('n_waiting',)) + @property + def broken(self): + return self._callmethod('__getattribute__', ('broken',)) + + +class NamespaceProxy(BaseProxy): + _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') + def __getattr__(self, key): + if key[0] == '_': + return object.__getattribute__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__getattribute__', (key,)) + def __setattr__(self, key, value): + if key[0] == '_': + return object.__setattr__(self, key, value) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__setattr__', (key, value)) + def __delattr__(self, key): + if key[0] == '_': + return object.__delattr__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__delattr__', (key,)) + + +class ValueProxy(BaseProxy): + _exposed_ = ('get', 'set') + def get(self): + return self._callmethod('get') + def set(self, value): + return self._callmethod('set', (value,)) + value = property(get, set) + + __class_getitem__ = classmethod(types.GenericAlias) + + +BaseListProxy = MakeProxyType('BaseListProxy', ( + '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', + '__mul__', '__reversed__', '__rmul__', '__setitem__', + 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', + 'reverse', 'sort', '__imul__' + )) +class ListProxy(BaseListProxy): + def __iadd__(self, value): + self._callmethod('extend', (value,)) + return self + def __imul__(self, value): + self._callmethod('__imul__', (value,)) + return self + + +DictProxy = MakeProxyType('DictProxy', ( + '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', + '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', + 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' + )) +DictProxy._method_to_typeid_ = { + '__iter__': 'Iterator', + } + + +ArrayProxy = MakeProxyType('ArrayProxy', ( + '__len__', '__getitem__', '__setitem__' + )) + + +BasePoolProxy = MakeProxyType('PoolProxy', ( + 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', + 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', + )) +BasePoolProxy._method_to_typeid_ = { + 'apply_async': 'AsyncResult', + 'map_async': 'AsyncResult', + 'starmap_async': 'AsyncResult', + 'imap': 'Iterator', + 'imap_unordered': 'Iterator' + } +class PoolProxy(BasePoolProxy): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + +# +# Definition of SyncManager +# + +class SyncManager(BaseManager): + ''' + Subclass of `BaseManager` which supports a number of shared object types. + + The types registered are those intended for the synchronization + of threads, plus `dict`, `list` and `Namespace`. + + The `multiprocess.Manager()` function creates started instances of + this class. + ''' + +SyncManager.register('Queue', queue.Queue) +SyncManager.register('JoinableQueue', queue.Queue) +SyncManager.register('Event', threading.Event, EventProxy) +SyncManager.register('Lock', threading.Lock, AcquirerProxy) +SyncManager.register('RLock', threading.RLock, AcquirerProxy) +SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) +SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, + AcquirerProxy) +SyncManager.register('Condition', threading.Condition, ConditionProxy) +SyncManager.register('Barrier', threading.Barrier, BarrierProxy) +SyncManager.register('Pool', pool.Pool, PoolProxy) +SyncManager.register('list', list, ListProxy) +SyncManager.register('dict', dict, DictProxy) +SyncManager.register('Value', Value, ValueProxy) +SyncManager.register('Array', Array, ArrayProxy) +SyncManager.register('Namespace', Namespace, NamespaceProxy) + +# types returned by methods of PoolProxy +SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) +SyncManager.register('AsyncResult', create_method=False) + +# +# Definition of SharedMemoryManager and SharedMemoryServer +# + +if HAS_SHMEM: + class _SharedMemoryTracker: + "Manages one or more shared memory segments." + + def __init__(self, name, segment_names=[]): + self.shared_memory_context_name = name + self.segment_names = segment_names + + def register_segment(self, segment_name): + "Adds the supplied shared memory block name to tracker." + util.debug(f"Register segment {segment_name!r} in pid {getpid()}") + self.segment_names.append(segment_name) + + def destroy_segment(self, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the list of blocks being tracked.""" + util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") + self.segment_names.remove(segment_name) + segment = shared_memory.SharedMemory(segment_name) + segment.close() + segment.unlink() + + def unlink(self): + "Calls destroy_segment() on all tracked shared memory blocks." + for segment_name in self.segment_names[:]: + self.destroy_segment(segment_name) + + def __del__(self): + util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") + self.unlink() + + def __getstate__(self): + return (self.shared_memory_context_name, self.segment_names) + + def __setstate__(self, state): + self.__init__(*state) + + + class SharedMemoryServer(Server): + + public = Server.public + \ + ['track_segment', 'release_segment', 'list_segments'] + + def __init__(self, *args, **kwargs): + Server.__init__(self, *args, **kwargs) + address = self.address + # The address of Linux abstract namespaces can be bytes + if isinstance(address, bytes): + address = os.fsdecode(address) + self.shared_memory_context = \ + _SharedMemoryTracker(f"shm_{address}_{getpid()}") + util.debug(f"SharedMemoryServer started by pid {getpid()}") + + def create(self, c, typeid, /, *args, **kwargs): + """Create a new distributed-shared object (not backed by a shared + memory block) and return its id to be used in a Proxy Object.""" + # Unless set up as a shared proxy, don't make shared_memory_context + # a standard part of kwargs. This makes things easier for supplying + # simple functions. + if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): + kwargs['shared_memory_context'] = self.shared_memory_context + return Server.create(self, c, typeid, *args, **kwargs) + + def shutdown(self, c): + "Call unlink() on all tracked shared memory, terminate the Server." + self.shared_memory_context.unlink() + return Server.shutdown(self, c) + + def track_segment(self, c, segment_name): + "Adds the supplied shared memory block name to Server's tracker." + self.shared_memory_context.register_segment(segment_name) + + def release_segment(self, c, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the tracker instance inside the Server.""" + self.shared_memory_context.destroy_segment(segment_name) + + def list_segments(self, c): + """Returns a list of names of shared memory blocks that the Server + is currently tracking.""" + return self.shared_memory_context.segment_names + + + class SharedMemoryManager(BaseManager): + """Like SyncManager but uses SharedMemoryServer instead of Server. + + It provides methods for creating and returning SharedMemory instances + and for creating a list-like object (ShareableList) backed by shared + memory. It also provides methods that create and return Proxy Objects + that support synchronization across processes (i.e. multi-process-safe + locks and semaphores). + """ + + _Server = SharedMemoryServer + + def __init__(self, *args, **kwargs): + if os.name == "posix": + # bpo-36867: Ensure the resource_tracker is running before + # launching the manager process, so that concurrent + # shared_memory manipulation both in the manager and in the + # current process does not create two resource_tracker + # processes. + from . import resource_tracker + resource_tracker.ensure_running() + BaseManager.__init__(self, *args, **kwargs) + util.debug(f"{self.__class__.__name__} created by pid {getpid()}") + + def __del__(self): + util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") + pass + + def get_server(self): + 'Better than monkeypatching for now; merge into Server ultimately' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started SharedMemoryServer") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("SharedMemoryManager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self._Server(self._registry, self._address, + self._authkey, self._serializer) + + def SharedMemory(self, size): + """Returns a new SharedMemory instance with the specified size in + bytes, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sms = shared_memory.SharedMemory(None, create=True, size=size) + try: + dispatch(conn, None, 'track_segment', (sms.name,)) + except BaseException as e: + sms.unlink() + raise e + return sms + + def ShareableList(self, sequence): + """Returns a new ShareableList instance populated with the values + from the input sequence, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sl = shared_memory.ShareableList(sequence) + try: + dispatch(conn, None, 'track_segment', (sl.shm.name,)) + except BaseException as e: + sl.shm.unlink() + raise e + return sl diff --git a/lib/python3.10/site-packages/multiprocess/pool.py b/lib/python3.10/site-packages/multiprocess/pool.py new file mode 100644 index 0000000000000000000000000000000000000000..bbe05a550c349cdaaf32f2d849a5b56aaed9a583 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/pool.py @@ -0,0 +1,954 @@ +# +# Module providing the `Pool` class for managing a process pool +# +# multiprocessing/pool.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['Pool', 'ThreadPool'] + +# +# Imports +# + +import collections +import itertools +import os +import queue +import threading +import time +import traceback +import types +import warnings + +# If threading is available then ThreadPool should be provided. Therefore +# we avoid top-level imports which are liable to fail on some systems. +from . import util +from . import get_context, TimeoutError +from .connection import wait + +# +# Constants representing the state of a pool +# + +INIT = "INIT" +RUN = "RUN" +CLOSE = "CLOSE" +TERMINATE = "TERMINATE" + +# +# Miscellaneous +# + +job_counter = itertools.count() + +def mapstar(args): + return list(map(*args)) + +def starmapstar(args): + return list(itertools.starmap(args[0], args[1])) + +# +# Hack to embed stringification of remote traceback in local traceback +# + +class RemoteTraceback(Exception): + def __init__(self, tb): + self.tb = tb + def __str__(self): + return self.tb + +class ExceptionWithTraceback: + def __init__(self, exc, tb): + tb = traceback.format_exception(type(exc), exc, tb) + tb = ''.join(tb) + self.exc = exc + self.tb = '\n"""\n%s"""' % tb + def __reduce__(self): + return rebuild_exc, (self.exc, self.tb) + +def rebuild_exc(exc, tb): + exc.__cause__ = RemoteTraceback(tb) + return exc + +# +# Code run by worker processes +# + +class MaybeEncodingError(Exception): + """Wraps possible unpickleable errors, so they can be + safely sent through the socket.""" + + def __init__(self, exc, value): + self.exc = repr(exc) + self.value = repr(value) + super(MaybeEncodingError, self).__init__(self.exc, self.value) + + def __str__(self): + return "Error sending result: '%s'. Reason: '%s'" % (self.value, + self.exc) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + +def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, + wrap_exception=False): + if (maxtasks is not None) and not (isinstance(maxtasks, int) + and maxtasks >= 1): + raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) + put = outqueue.put + get = inqueue.get + if hasattr(inqueue, '_writer'): + inqueue._writer.close() + outqueue._reader.close() + + if initializer is not None: + initializer(*initargs) + + completed = 0 + while maxtasks is None or (maxtasks and completed < maxtasks): + try: + task = get() + except (EOFError, OSError): + util.debug('worker got EOFError or OSError -- exiting') + break + + if task is None: + util.debug('worker got sentinel -- exiting') + break + + job, i, func, args, kwds = task + try: + result = (True, func(*args, **kwds)) + except Exception as e: + if wrap_exception and func is not _helper_reraises_exception: + e = ExceptionWithTraceback(e, e.__traceback__) + result = (False, e) + try: + put((job, i, result)) + except Exception as e: + wrapped = MaybeEncodingError(e, result[1]) + util.debug("Possible encoding error while sending result: %s" % ( + wrapped)) + put((job, i, (False, wrapped))) + + task = job = result = func = args = kwds = None + completed += 1 + util.debug('worker exiting after %d tasks' % completed) + +def _helper_reraises_exception(ex): + 'Pickle-able helper function for use by _guarded_task_generation.' + raise ex + +# +# Class representing a process pool +# + +class _PoolCache(dict): + """ + Class that implements a cache for the Pool class that will notify + the pool management threads every time the cache is emptied. The + notification is done by the use of a queue that is provided when + instantiating the cache. + """ + def __init__(self, /, *args, notifier=None, **kwds): + self.notifier = notifier + super().__init__(*args, **kwds) + + def __delitem__(self, item): + super().__delitem__(item) + + # Notify that the cache is empty. This is important because the + # pool keeps maintaining workers until the cache gets drained. This + # eliminates a race condition in which a task is finished after the + # the pool's _handle_workers method has enter another iteration of the + # loop. In this situation, the only event that can wake up the pool + # is the cache to be emptied (no more tasks available). + if not self: + self.notifier.put(None) + +class Pool(object): + ''' + Class which supports an async version of applying functions to arguments. + ''' + _wrap_exception = True + + @staticmethod + def Process(ctx, *args, **kwds): + return ctx.Process(*args, **kwds) + + def __init__(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None, context=None): + # Attributes initialized early to make sure that they exist in + # __del__() if __init__() raises an exception + self._pool = [] + self._state = INIT + + self._ctx = context or get_context() + self._setup_queues() + self._taskqueue = queue.SimpleQueue() + # The _change_notifier queue exist to wake up self._handle_workers() + # when the cache (self._cache) is empty or when there is a change in + # the _state variable of the thread that runs _handle_workers. + self._change_notifier = self._ctx.SimpleQueue() + self._cache = _PoolCache(notifier=self._change_notifier) + self._maxtasksperchild = maxtasksperchild + self._initializer = initializer + self._initargs = initargs + + if processes is None: + processes = os.cpu_count() or 1 + if processes < 1: + raise ValueError("Number of processes must be at least 1") + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + self._processes = processes + try: + self._repopulate_pool() + except Exception: + for p in self._pool: + if p.exitcode is None: + p.terminate() + for p in self._pool: + p.join() + raise + + sentinels = self._get_sentinels() + + self._worker_handler = threading.Thread( + target=Pool._handle_workers, + args=(self._cache, self._taskqueue, self._ctx, self.Process, + self._processes, self._pool, self._inqueue, self._outqueue, + self._initializer, self._initargs, self._maxtasksperchild, + self._wrap_exception, sentinels, self._change_notifier) + ) + self._worker_handler.daemon = True + self._worker_handler._state = RUN + self._worker_handler.start() + + + self._task_handler = threading.Thread( + target=Pool._handle_tasks, + args=(self._taskqueue, self._quick_put, self._outqueue, + self._pool, self._cache) + ) + self._task_handler.daemon = True + self._task_handler._state = RUN + self._task_handler.start() + + self._result_handler = threading.Thread( + target=Pool._handle_results, + args=(self._outqueue, self._quick_get, self._cache) + ) + self._result_handler.daemon = True + self._result_handler._state = RUN + self._result_handler.start() + + self._terminate = util.Finalize( + self, self._terminate_pool, + args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, + self._change_notifier, self._worker_handler, self._task_handler, + self._result_handler, self._cache), + exitpriority=15 + ) + self._state = RUN + + # Copy globals as function locals to make sure that they are available + # during Python shutdown when the Pool is destroyed. + def __del__(self, _warn=warnings.warn, RUN=RUN): + if self._state == RUN: + _warn(f"unclosed running multiprocessing pool {self!r}", + ResourceWarning, source=self) + if getattr(self, '_change_notifier', None) is not None: + self._change_notifier.put(None) + + def __repr__(self): + cls = self.__class__ + return (f'<{cls.__module__}.{cls.__qualname__} ' + f'state={self._state} ' + f'pool_size={len(self._pool)}>') + + def _get_sentinels(self): + task_queue_sentinels = [self._outqueue._reader] + self_notifier_sentinels = [self._change_notifier._reader] + return [*task_queue_sentinels, *self_notifier_sentinels] + + @staticmethod + def _get_worker_sentinels(workers): + return [worker.sentinel for worker in + workers if hasattr(worker, "sentinel")] + + @staticmethod + def _join_exited_workers(pool): + """Cleanup after any worker processes which have exited due to reaching + their specified lifetime. Returns True if any workers were cleaned up. + """ + cleaned = False + for i in reversed(range(len(pool))): + worker = pool[i] + if worker.exitcode is not None: + # worker exited + util.debug('cleaning up worker %d' % i) + worker.join() + cleaned = True + del pool[i] + return cleaned + + def _repopulate_pool(self): + return self._repopulate_pool_static(self._ctx, self.Process, + self._processes, + self._pool, self._inqueue, + self._outqueue, self._initializer, + self._initargs, + self._maxtasksperchild, + self._wrap_exception) + + @staticmethod + def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, + outqueue, initializer, initargs, + maxtasksperchild, wrap_exception): + """Bring the number of pool processes up to the specified number, + for use after reaping workers which have exited. + """ + for i in range(processes - len(pool)): + w = Process(ctx, target=worker, + args=(inqueue, outqueue, + initializer, + initargs, maxtasksperchild, + wrap_exception)) + w.name = w.name.replace('Process', 'PoolWorker') + w.daemon = True + w.start() + pool.append(w) + util.debug('added worker') + + @staticmethod + def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, + initializer, initargs, maxtasksperchild, + wrap_exception): + """Clean up any exited workers and start replacements for them. + """ + if Pool._join_exited_workers(pool): + Pool._repopulate_pool_static(ctx, Process, processes, pool, + inqueue, outqueue, initializer, + initargs, maxtasksperchild, + wrap_exception) + + def _setup_queues(self): + self._inqueue = self._ctx.SimpleQueue() + self._outqueue = self._ctx.SimpleQueue() + self._quick_put = self._inqueue._writer.send + self._quick_get = self._outqueue._reader.recv + + def _check_running(self): + if self._state != RUN: + raise ValueError("Pool not running") + + def apply(self, func, args=(), kwds={}): + ''' + Equivalent of `func(*args, **kwds)`. + Pool must be running. + ''' + return self.apply_async(func, args, kwds).get() + + def map(self, func, iterable, chunksize=None): + ''' + Apply `func` to each element in `iterable`, collecting the results + in a list that is returned. + ''' + return self._map_async(func, iterable, mapstar, chunksize).get() + + def starmap(self, func, iterable, chunksize=None): + ''' + Like `map()` method but the elements of the `iterable` are expected to + be iterables as well and will be unpacked as arguments. Hence + `func` and (a, b) becomes func(a, b). + ''' + return self._map_async(func, iterable, starmapstar, chunksize).get() + + def starmap_async(self, func, iterable, chunksize=None, callback=None, + error_callback=None): + ''' + Asynchronous version of `starmap()` method. + ''' + return self._map_async(func, iterable, starmapstar, chunksize, + callback, error_callback) + + def _guarded_task_generation(self, result_job, func, iterable): + '''Provides a generator of tasks for imap and imap_unordered with + appropriate handling for iterables which throw exceptions during + iteration.''' + try: + i = -1 + for i, x in enumerate(iterable): + yield (result_job, i, func, (x,), {}) + except Exception as e: + yield (result_job, i+1, _helper_reraises_exception, (e,), {}) + + def imap(self, func, iterable, chunksize=1): + ''' + Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. + ''' + self._check_running() + if chunksize == 1: + result = IMapIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, func, iterable), + result._set_length + )) + return result + else: + if chunksize < 1: + raise ValueError( + "Chunksize must be 1+, not {0:n}".format( + chunksize)) + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = IMapIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapstar, + task_batches), + result._set_length + )) + return (item for chunk in result for item in chunk) + + def imap_unordered(self, func, iterable, chunksize=1): + ''' + Like `imap()` method but ordering of results is arbitrary. + ''' + self._check_running() + if chunksize == 1: + result = IMapUnorderedIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, func, iterable), + result._set_length + )) + return result + else: + if chunksize < 1: + raise ValueError( + "Chunksize must be 1+, not {0!r}".format(chunksize)) + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = IMapUnorderedIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapstar, + task_batches), + result._set_length + )) + return (item for chunk in result for item in chunk) + + def apply_async(self, func, args=(), kwds={}, callback=None, + error_callback=None): + ''' + Asynchronous version of `apply()` method. + ''' + self._check_running() + result = ApplyResult(self, callback, error_callback) + self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) + return result + + def map_async(self, func, iterable, chunksize=None, callback=None, + error_callback=None): + ''' + Asynchronous version of `map()` method. + ''' + return self._map_async(func, iterable, mapstar, chunksize, callback, + error_callback) + + def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, + error_callback=None): + ''' + Helper function to implement map, starmap and their async counterparts. + ''' + self._check_running() + if not hasattr(iterable, '__len__'): + iterable = list(iterable) + + if chunksize is None: + chunksize, extra = divmod(len(iterable), len(self._pool) * 4) + if extra: + chunksize += 1 + if len(iterable) == 0: + chunksize = 0 + + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = MapResult(self, chunksize, len(iterable), callback, + error_callback=error_callback) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapper, + task_batches), + None + ) + ) + return result + + @staticmethod + def _wait_for_updates(sentinels, change_notifier, timeout=None): + wait(sentinels, timeout=timeout) + while not change_notifier.empty(): + change_notifier.get() + + @classmethod + def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, + pool, inqueue, outqueue, initializer, initargs, + maxtasksperchild, wrap_exception, sentinels, + change_notifier): + thread = threading.current_thread() + + # Keep maintaining workers until the cache gets drained, unless the pool + # is terminated. + while thread._state == RUN or (cache and thread._state != TERMINATE): + cls._maintain_pool(ctx, Process, processes, pool, inqueue, + outqueue, initializer, initargs, + maxtasksperchild, wrap_exception) + + current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] + + cls._wait_for_updates(current_sentinels, change_notifier) + # send sentinel to stop workers + taskqueue.put(None) + util.debug('worker handler exiting') + + @staticmethod + def _handle_tasks(taskqueue, put, outqueue, pool, cache): + thread = threading.current_thread() + + for taskseq, set_length in iter(taskqueue.get, None): + task = None + try: + # iterating taskseq cannot fail + for task in taskseq: + if thread._state != RUN: + util.debug('task handler found thread._state != RUN') + break + try: + put(task) + except Exception as e: + job, idx = task[:2] + try: + cache[job]._set(idx, (False, e)) + except KeyError: + pass + else: + if set_length: + util.debug('doing set_length()') + idx = task[1] if task else -1 + set_length(idx + 1) + continue + break + finally: + task = taskseq = job = None + else: + util.debug('task handler got sentinel') + + try: + # tell result handler to finish when cache is empty + util.debug('task handler sending sentinel to result handler') + outqueue.put(None) + + # tell workers there is no more work + util.debug('task handler sending sentinel to workers') + for p in pool: + put(None) + except OSError: + util.debug('task handler got OSError when sending sentinels') + + util.debug('task handler exiting') + + @staticmethod + def _handle_results(outqueue, get, cache): + thread = threading.current_thread() + + while 1: + try: + task = get() + except (OSError, EOFError): + util.debug('result handler got EOFError/OSError -- exiting') + return + + if thread._state != RUN: + assert thread._state == TERMINATE, "Thread not in TERMINATE" + util.debug('result handler found thread._state=TERMINATE') + break + + if task is None: + util.debug('result handler got sentinel') + break + + job, i, obj = task + try: + cache[job]._set(i, obj) + except KeyError: + pass + task = job = obj = None + + while cache and thread._state != TERMINATE: + try: + task = get() + except (OSError, EOFError): + util.debug('result handler got EOFError/OSError -- exiting') + return + + if task is None: + util.debug('result handler ignoring extra sentinel') + continue + job, i, obj = task + try: + cache[job]._set(i, obj) + except KeyError: + pass + task = job = obj = None + + if hasattr(outqueue, '_reader'): + util.debug('ensuring that outqueue is not full') + # If we don't make room available in outqueue then + # attempts to add the sentinel (None) to outqueue may + # block. There is guaranteed to be no more than 2 sentinels. + try: + for i in range(10): + if not outqueue._reader.poll(): + break + get() + except (OSError, EOFError): + pass + + util.debug('result handler exiting: len(cache)=%s, thread._state=%s', + len(cache), thread._state) + + @staticmethod + def _get_tasks(func, it, size): + it = iter(it) + while 1: + x = tuple(itertools.islice(it, size)) + if not x: + return + yield (func, x) + + def __reduce__(self): + raise NotImplementedError( + 'pool objects cannot be passed between processes or pickled' + ) + + def close(self): + util.debug('closing pool') + if self._state == RUN: + self._state = CLOSE + self._worker_handler._state = CLOSE + self._change_notifier.put(None) + + def terminate(self): + util.debug('terminating pool') + self._state = TERMINATE + self._terminate() + + def join(self): + util.debug('joining pool') + if self._state == RUN: + raise ValueError("Pool is still running") + elif self._state not in (CLOSE, TERMINATE): + raise ValueError("In unknown state") + self._worker_handler.join() + self._task_handler.join() + self._result_handler.join() + for p in self._pool: + p.join() + + @staticmethod + def _help_stuff_finish(inqueue, task_handler, size): + # task_handler may be blocked trying to put items on inqueue + util.debug('removing tasks from inqueue until task handler finished') + inqueue._rlock.acquire() + while task_handler.is_alive() and inqueue._reader.poll(): + inqueue._reader.recv() + time.sleep(0) + + @classmethod + def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, + worker_handler, task_handler, result_handler, cache): + # this is guaranteed to only be called once + util.debug('finalizing pool') + + # Notify that the worker_handler state has been changed so the + # _handle_workers loop can be unblocked (and exited) in order to + # send the finalization sentinel all the workers. + worker_handler._state = TERMINATE + change_notifier.put(None) + + task_handler._state = TERMINATE + + util.debug('helping task handler/workers to finish') + cls._help_stuff_finish(inqueue, task_handler, len(pool)) + + if (not result_handler.is_alive()) and (len(cache) != 0): + raise AssertionError( + "Cannot have cache with result_hander not alive") + + result_handler._state = TERMINATE + change_notifier.put(None) + outqueue.put(None) # sentinel + + # We must wait for the worker handler to exit before terminating + # workers because we don't want workers to be restarted behind our back. + util.debug('joining worker handler') + if threading.current_thread() is not worker_handler: + worker_handler.join() + + # Terminate workers which haven't already finished. + if pool and hasattr(pool[0], 'terminate'): + util.debug('terminating workers') + for p in pool: + if p.exitcode is None: + p.terminate() + + util.debug('joining task handler') + if threading.current_thread() is not task_handler: + task_handler.join() + + util.debug('joining result handler') + if threading.current_thread() is not result_handler: + result_handler.join() + + if pool and hasattr(pool[0], 'terminate'): + util.debug('joining pool workers') + for p in pool: + if p.is_alive(): + # worker has not yet exited + util.debug('cleaning up worker %d' % p.pid) + p.join() + + def __enter__(self): + self._check_running() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + +# +# Class whose instances are returned by `Pool.apply_async()` +# + +class ApplyResult(object): + + def __init__(self, pool, callback, error_callback): + self._pool = pool + self._event = threading.Event() + self._job = next(job_counter) + self._cache = pool._cache + self._callback = callback + self._error_callback = error_callback + self._cache[self._job] = self + + def ready(self): + return self._event.is_set() + + def successful(self): + if not self.ready(): + raise ValueError("{0!r} not ready".format(self)) + return self._success + + def wait(self, timeout=None): + self._event.wait(timeout) + + def get(self, timeout=None): + self.wait(timeout) + if not self.ready(): + raise TimeoutError + if self._success: + return self._value + else: + raise self._value + + def _set(self, i, obj): + self._success, self._value = obj + if self._callback and self._success: + self._callback(self._value) + if self._error_callback and not self._success: + self._error_callback(self._value) + self._event.set() + del self._cache[self._job] + self._pool = None + + __class_getitem__ = classmethod(types.GenericAlias) + +AsyncResult = ApplyResult # create alias -- see #17805 + +# +# Class whose instances are returned by `Pool.map_async()` +# + +class MapResult(ApplyResult): + + def __init__(self, pool, chunksize, length, callback, error_callback): + ApplyResult.__init__(self, pool, callback, + error_callback=error_callback) + self._success = True + self._value = [None] * length + self._chunksize = chunksize + if chunksize <= 0: + self._number_left = 0 + self._event.set() + del self._cache[self._job] + else: + self._number_left = length//chunksize + bool(length % chunksize) + + def _set(self, i, success_result): + self._number_left -= 1 + success, result = success_result + if success and self._success: + self._value[i*self._chunksize:(i+1)*self._chunksize] = result + if self._number_left == 0: + if self._callback: + self._callback(self._value) + del self._cache[self._job] + self._event.set() + self._pool = None + else: + if not success and self._success: + # only store first exception + self._success = False + self._value = result + if self._number_left == 0: + # only consider the result ready once all jobs are done + if self._error_callback: + self._error_callback(self._value) + del self._cache[self._job] + self._event.set() + self._pool = None + +# +# Class whose instances are returned by `Pool.imap()` +# + +class IMapIterator(object): + + def __init__(self, pool): + self._pool = pool + self._cond = threading.Condition(threading.Lock()) + self._job = next(job_counter) + self._cache = pool._cache + self._items = collections.deque() + self._index = 0 + self._length = None + self._unsorted = {} + self._cache[self._job] = self + + def __iter__(self): + return self + + def next(self, timeout=None): + with self._cond: + try: + item = self._items.popleft() + except IndexError: + if self._index == self._length: + self._pool = None + raise StopIteration from None + self._cond.wait(timeout) + try: + item = self._items.popleft() + except IndexError: + if self._index == self._length: + self._pool = None + raise StopIteration from None + raise TimeoutError from None + + success, value = item + if success: + return value + raise value + + __next__ = next # XXX + + def _set(self, i, obj): + with self._cond: + if self._index == i: + self._items.append(obj) + self._index += 1 + while self._index in self._unsorted: + obj = self._unsorted.pop(self._index) + self._items.append(obj) + self._index += 1 + self._cond.notify() + else: + self._unsorted[i] = obj + + if self._index == self._length: + del self._cache[self._job] + self._pool = None + + def _set_length(self, length): + with self._cond: + self._length = length + if self._index == self._length: + self._cond.notify() + del self._cache[self._job] + self._pool = None + +# +# Class whose instances are returned by `Pool.imap_unordered()` +# + +class IMapUnorderedIterator(IMapIterator): + + def _set(self, i, obj): + with self._cond: + self._items.append(obj) + self._index += 1 + self._cond.notify() + if self._index == self._length: + del self._cache[self._job] + self._pool = None + +# +# +# + +class ThreadPool(Pool): + _wrap_exception = False + + @staticmethod + def Process(ctx, *args, **kwds): + from .dummy import Process + return Process(*args, **kwds) + + def __init__(self, processes=None, initializer=None, initargs=()): + Pool.__init__(self, processes, initializer, initargs) + + def _setup_queues(self): + self._inqueue = queue.SimpleQueue() + self._outqueue = queue.SimpleQueue() + self._quick_put = self._inqueue.put + self._quick_get = self._outqueue.get + + def _get_sentinels(self): + return [self._change_notifier._reader] + + @staticmethod + def _get_worker_sentinels(workers): + return [] + + @staticmethod + def _help_stuff_finish(inqueue, task_handler, size): + # drain inqueue, and put sentinels at its head to make workers finish + try: + while True: + inqueue.get(block=False) + except queue.Empty: + pass + for i in range(size): + inqueue.put(None) + + def _wait_for_updates(self, sentinels, change_notifier, timeout): + time.sleep(timeout) diff --git a/lib/python3.10/site-packages/multiprocess/popen_forkserver.py b/lib/python3.10/site-packages/multiprocess/popen_forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..2cea1fc7a4602172b87df391fdc831c9d40ac49a --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/popen_forkserver.py @@ -0,0 +1,74 @@ +import io +import os + +from .context import reduction, set_spawning_popen +if not reduction.HAVE_SEND_HANDLE: + raise ImportError('No support for sending fds between processes') +from . import forkserver +from . import popen_fork +from . import spawn +from . import util + + +__all__ = ['Popen'] + +# +# Wrapper for an fd used while launching a process +# + +class _DupFd(object): + def __init__(self, ind): + self.ind = ind + def detach(self): + return forkserver.get_inherited_fds()[self.ind] + +# +# Start child process using a server process +# + +class Popen(popen_fork.Popen): + method = 'forkserver' + DupFd = _DupFd + + def __init__(self, process_obj): + self._fds = [] + super().__init__(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return len(self._fds) - 1 + + def _launch(self, process_obj): + prep_data = spawn.get_preparation_data(process_obj._name) + buf = io.BytesIO() + set_spawning_popen(self) + try: + reduction.dump(prep_data, buf) + reduction.dump(process_obj, buf) + finally: + set_spawning_popen(None) + + self.sentinel, w = forkserver.connect_to_new_process(self._fds) + # Keep a duplicate of the data pipe's write end as a sentinel of the + # parent process used by the child process. + _parent_w = os.dup(w) + self.finalizer = util.Finalize(self, util.close_fds, + (_parent_w, self.sentinel)) + with open(w, 'wb', closefd=True) as f: + f.write(buf.getbuffer()) + self.pid = forkserver.read_signed(self.sentinel) + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + from multiprocess.connection import wait + timeout = 0 if flag == os.WNOHANG else None + if not wait([self.sentinel], timeout): + return None + try: + self.returncode = forkserver.read_signed(self.sentinel) + except (OSError, EOFError): + # This should not happen usually, but perhaps the forkserver + # process itself got killed + self.returncode = 255 + + return self.returncode diff --git a/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py b/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py new file mode 100644 index 0000000000000000000000000000000000000000..24b8634523e5f2c29cd8bb21022c26d22a4fb13b --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py @@ -0,0 +1,72 @@ +import io +import os + +from .context import reduction, set_spawning_popen +from . import popen_fork +from . import spawn +from . import util + +__all__ = ['Popen'] + + +# +# Wrapper for an fd used while launching a process +# + +class _DupFd(object): + def __init__(self, fd): + self.fd = fd + def detach(self): + return self.fd + +# +# Start child process using a fresh interpreter +# + +class Popen(popen_fork.Popen): + method = 'spawn' + DupFd = _DupFd + + def __init__(self, process_obj): + self._fds = [] + super().__init__(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return fd + + def _launch(self, process_obj): + from . import resource_tracker + tracker_fd = resource_tracker.getfd() + self._fds.append(tracker_fd) + prep_data = spawn.get_preparation_data(process_obj._name) + fp = io.BytesIO() + set_spawning_popen(self) + try: + reduction.dump(prep_data, fp) + reduction.dump(process_obj, fp) + finally: + set_spawning_popen(None) + + parent_r = child_w = child_r = parent_w = None + try: + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + cmd = spawn.get_command_line(tracker_fd=tracker_fd, + pipe_handle=child_r) + self._fds.extend([child_r, child_w]) + self.pid = util.spawnv_passfds(spawn.get_executable(), + cmd, self._fds) + self.sentinel = parent_r + with open(parent_w, 'wb', closefd=False) as f: + f.write(fp.getbuffer()) + finally: + fds_to_close = [] + for fd in (parent_r, parent_w): + if fd is not None: + fds_to_close.append(fd) + self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) + + for fd in (child_r, child_w): + if fd is not None: + os.close(fd) diff --git a/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py b/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py new file mode 100644 index 0000000000000000000000000000000000000000..9c4098d0fa4f1e6e3ec94ecc8e596dd3857d741f --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py @@ -0,0 +1,131 @@ +import os +import msvcrt +import signal +import sys +import _winapi + +from .context import reduction, get_spawning_popen, set_spawning_popen +from . import spawn +from . import util + +__all__ = ['Popen'] + +# +# +# + +TERMINATE = 0x10000 +WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) +WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + + +def _path_eq(p1, p2): + return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) + +WINENV = not _path_eq(sys.executable, sys._base_executable) + + +def _close_handles(*handles): + for handle in handles: + _winapi.CloseHandle(handle) + + +# +# We define a Popen class similar to the one from subprocess, but +# whose constructor takes a process object as its argument. +# + +class Popen(object): + ''' + Start a subprocess to run the code of a process object + ''' + method = 'spawn' + + def __init__(self, process_obj): + prep_data = spawn.get_preparation_data(process_obj._name) + + # read end of pipe will be duplicated by the child process + # -- see spawn_main() in spawn.py. + # + # bpo-33929: Previously, the read end of pipe was "stolen" by the child + # process, but it leaked a handle if the child process had been + # terminated before it could steal the handle from the parent process. + rhandle, whandle = _winapi.CreatePipe(None, 0) + wfd = msvcrt.open_osfhandle(whandle, 0) + cmd = spawn.get_command_line(parent_pid=os.getpid(), + pipe_handle=rhandle) + cmd = ' '.join('"%s"' % x for x in cmd) + + python_exe = spawn.get_executable() + + # bpo-35797: When running in a venv, we bypass the redirect + # executor and launch our base Python. + if WINENV and _path_eq(python_exe, sys.executable): + python_exe = sys._base_executable + env = os.environ.copy() + env["__PYVENV_LAUNCHER__"] = sys.executable + else: + env = None + + with open(wfd, 'wb', closefd=True) as to_child: + # start process + try: + hp, ht, pid, tid = _winapi.CreateProcess( + python_exe, cmd, + None, None, False, 0, env, None, None) + _winapi.CloseHandle(ht) + except: + _winapi.CloseHandle(rhandle) + raise + + # set attributes of self + self.pid = pid + self.returncode = None + self._handle = hp + self.sentinel = int(hp) + self.finalizer = util.Finalize(self, _close_handles, + (self.sentinel, int(rhandle))) + + # send information to child + set_spawning_popen(self) + try: + reduction.dump(prep_data, to_child) + reduction.dump(process_obj, to_child) + finally: + set_spawning_popen(None) + + def duplicate_for_child(self, handle): + assert self is get_spawning_popen() + return reduction.duplicate(handle, self.sentinel) + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is None: + msecs = _winapi.INFINITE + else: + msecs = max(0, int(timeout * 1000 + 0.5)) + + res = _winapi.WaitForSingleObject(int(self._handle), msecs) + if res == _winapi.WAIT_OBJECT_0: + code = _winapi.GetExitCodeProcess(self._handle) + if code == TERMINATE: + code = -signal.SIGTERM + self.returncode = code + + return self.returncode + + def poll(self): + return self.wait(timeout=0) + + def terminate(self): + if self.returncode is None: + try: + _winapi.TerminateProcess(int(self._handle), TERMINATE) + except OSError: + if self.wait(timeout=1.0) is None: + raise + + kill = terminate + + def close(self): + self.finalizer() diff --git a/lib/python3.10/site-packages/multiprocess/process.py b/lib/python3.10/site-packages/multiprocess/process.py new file mode 100644 index 0000000000000000000000000000000000000000..0b2e0b45b2397be0a672b36b7e4c572b3996cf53 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/process.py @@ -0,0 +1,432 @@ +# +# Module providing the `Process` class which emulates `threading.Thread` +# +# multiprocessing/process.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['BaseProcess', 'current_process', 'active_children', + 'parent_process'] + +# +# Imports +# + +import os +import sys +import signal +import itertools +import threading +from _weakrefset import WeakSet + +# +# +# + +try: + ORIGINAL_DIR = os.path.abspath(os.getcwd()) +except OSError: + ORIGINAL_DIR = None + +# +# Public functions +# + +def current_process(): + ''' + Return process object representing the current process + ''' + return _current_process + +def active_children(): + ''' + Return list of process objects corresponding to live child processes + ''' + _cleanup() + return list(_children) + + +def parent_process(): + ''' + Return process object representing the parent process + ''' + return _parent_process + +# +# +# + +def _cleanup(): + # check for processes which have finished + for p in list(_children): + if p._popen.poll() is not None: + _children.discard(p) + +# +# The `Process` class +# + +class BaseProcess(object): + ''' + Process objects represent activity that is run in a separate process + + The class is analogous to `threading.Thread` + ''' + def _Popen(self): + raise NotImplementedError + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, + *, daemon=None): + assert group is None, 'group argument must be None for now' + count = next(_process_counter) + self._identity = _current_process._identity + (count,) + self._config = _current_process._config.copy() + self._parent_pid = os.getpid() + self._parent_name = _current_process.name + self._popen = None + self._closed = False + self._target = target + self._args = tuple(args) + self._kwargs = dict(kwargs) + self._name = name or type(self).__name__ + '-' + \ + ':'.join(str(i) for i in self._identity) + if daemon is not None: + self.daemon = daemon + _dangling.add(self) + + def _check_closed(self): + if self._closed: + raise ValueError("process object is closed") + + def run(self): + ''' + Method to be run in sub-process; can be overridden in sub-class + ''' + if self._target: + self._target(*self._args, **self._kwargs) + + def start(self): + ''' + Start child process + ''' + self._check_closed() + assert self._popen is None, 'cannot start a process twice' + assert self._parent_pid == os.getpid(), \ + 'can only start a process object created by current process' + assert not _current_process._config.get('daemon'), \ + 'daemonic processes are not allowed to have children' + _cleanup() + self._popen = self._Popen(self) + self._sentinel = self._popen.sentinel + # Avoid a refcycle if the target function holds an indirect + # reference to the process object (see bpo-30775) + del self._target, self._args, self._kwargs + _children.add(self) + + def terminate(self): + ''' + Terminate process; sends SIGTERM signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.terminate() + + def kill(self): + ''' + Terminate process; sends SIGKILL signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.kill() + + def join(self, timeout=None): + ''' + Wait until child process terminates + ''' + self._check_closed() + assert self._parent_pid == os.getpid(), 'can only join a child process' + assert self._popen is not None, 'can only join a started process' + res = self._popen.wait(timeout) + if res is not None: + _children.discard(self) + + def is_alive(self): + ''' + Return whether process is alive + ''' + self._check_closed() + if self is _current_process: + return True + assert self._parent_pid == os.getpid(), 'can only test a child process' + + if self._popen is None: + return False + + returncode = self._popen.poll() + if returncode is None: + return True + else: + _children.discard(self) + return False + + def close(self): + ''' + Close the Process object. + + This method releases resources held by the Process object. It is + an error to call this method if the child process is still running. + ''' + if self._popen is not None: + if self._popen.poll() is None: + raise ValueError("Cannot close a process while it is still running. " + "You should first call join() or terminate().") + self._popen.close() + self._popen = None + del self._sentinel + _children.discard(self) + self._closed = True + + @property + def name(self): + return self._name + + @name.setter + def name(self, name): + assert isinstance(name, str), 'name must be a string' + self._name = name + + @property + def daemon(self): + ''' + Return whether process is a daemon + ''' + return self._config.get('daemon', False) + + @daemon.setter + def daemon(self, daemonic): + ''' + Set whether process is a daemon + ''' + assert self._popen is None, 'process has already started' + self._config['daemon'] = daemonic + + @property + def authkey(self): + return self._config['authkey'] + + @authkey.setter + def authkey(self, authkey): + ''' + Set authorization key of process + ''' + self._config['authkey'] = AuthenticationString(authkey) + + @property + def exitcode(self): + ''' + Return exit code of process or `None` if it has yet to stop + ''' + self._check_closed() + if self._popen is None: + return self._popen + return self._popen.poll() + + @property + def ident(self): + ''' + Return identifier (PID) of process or `None` if it has yet to start + ''' + self._check_closed() + if self is _current_process: + return os.getpid() + else: + return self._popen and self._popen.pid + + pid = ident + + @property + def sentinel(self): + ''' + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + ''' + self._check_closed() + try: + return self._sentinel + except AttributeError: + raise ValueError("process not started") from None + + def __repr__(self): + exitcode = None + if self is _current_process: + status = 'started' + elif self._closed: + status = 'closed' + elif self._parent_pid != os.getpid(): + status = 'unknown' + elif self._popen is None: + status = 'initial' + else: + exitcode = self._popen.poll() + if exitcode is not None: + status = 'stopped' + else: + status = 'started' + + info = [type(self).__name__, 'name=%r' % self._name] + if self._popen is not None: + info.append('pid=%s' % self._popen.pid) + info.append('parent=%s' % self._parent_pid) + info.append(status) + if exitcode is not None: + exitcode = _exitcode_to_name.get(exitcode, exitcode) + info.append('exitcode=%s' % exitcode) + if self.daemon: + info.append('daemon') + return '<%s>' % ' '.join(info) + + ## + + def _bootstrap(self, parent_sentinel=None): + from . import util, context + global _current_process, _parent_process, _process_counter, _children + + try: + if self._start_method is not None: + context._force_start_method(self._start_method) + _process_counter = itertools.count(1) + _children = set() + util._close_stdin() + old_process = _current_process + _current_process = self + _parent_process = _ParentProcess( + self._parent_name, self._parent_pid, parent_sentinel) + if threading._HAVE_THREAD_NATIVE_ID: + threading.main_thread()._set_native_id() + try: + util._finalizer_registry.clear() + util._run_after_forkers() + finally: + # delay finalization of the old process object until after + # _run_after_forkers() is executed + del old_process + util.info('child process calling self.run()') + try: + self.run() + exitcode = 0 + finally: + util._exit_function() + except SystemExit as e: + if e.code is None: + exitcode = 0 + elif isinstance(e.code, int): + exitcode = e.code + else: + sys.stderr.write(str(e.code) + '\n') + exitcode = 1 + except: + exitcode = 1 + import traceback + sys.stderr.write('Process %s:\n' % self.name) + traceback.print_exc() + finally: + threading._shutdown() + util.info('process exiting with exitcode %d' % exitcode) + util._flush_std_streams() + + return exitcode + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + +class AuthenticationString(bytes): + def __reduce__(self): + from .context import get_spawning_popen + if get_spawning_popen() is None: + raise TypeError( + 'Pickling an AuthenticationString object is ' + 'disallowed for security reasons' + ) + return AuthenticationString, (bytes(self),) + + +# +# Create object representing the parent process +# + +class _ParentProcess(BaseProcess): + + def __init__(self, name, pid, sentinel): + self._identity = () + self._name = name + self._pid = pid + self._parent_pid = None + self._popen = None + self._closed = False + self._sentinel = sentinel + self._config = {} + + def is_alive(self): + from multiprocessing.connection import wait + return not wait([self._sentinel], timeout=0) + + @property + def ident(self): + return self._pid + + def join(self, timeout=None): + ''' + Wait until parent process terminates + ''' + from multiprocessing.connection import wait + wait([self._sentinel], timeout=timeout) + + pid = ident + +# +# Create object representing the main process +# + +class _MainProcess(BaseProcess): + + def __init__(self): + self._identity = () + self._name = 'MainProcess' + self._parent_pid = None + self._popen = None + self._closed = False + self._config = {'authkey': AuthenticationString(os.urandom(32)), + 'semprefix': '/mp'} + # Note that some versions of FreeBSD only allow named + # semaphores to have names of up to 14 characters. Therefore + # we choose a short prefix. + # + # On MacOSX in a sandbox it may be necessary to use a + # different prefix -- see #19478. + # + # Everything in self._config will be inherited by descendant + # processes. + + def close(self): + pass + + +_parent_process = None +_current_process = _MainProcess() +_process_counter = itertools.count(1) +_children = set() +del _MainProcess + +# +# Give names to some return codes +# + +_exitcode_to_name = {} + +for name, signum in list(signal.__dict__.items()): + if name[:3]=='SIG' and '_' not in name: + _exitcode_to_name[-signum] = f'-{name}' + +# For debug and leak testing +_dangling = WeakSet() diff --git a/lib/python3.10/site-packages/multiprocess/queues.py b/lib/python3.10/site-packages/multiprocess/queues.py new file mode 100644 index 0000000000000000000000000000000000000000..cade46d9cba37e0a593da49e8d47e4e62425beb6 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/queues.py @@ -0,0 +1,383 @@ +# +# Module implementing queues +# +# multiprocessing/queues.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] + +import sys +import os +import threading +import collections +import time +import types +import weakref +import errno + +from queue import Empty, Full + +try: + import _multiprocess as _multiprocessing +except ImportError: + import _multiprocessing + +from . import connection +from . import context +_ForkingPickler = context.reduction.ForkingPickler + +from .util import debug, info, Finalize, register_after_fork, is_exiting + +# +# Queue type using a pipe, buffer and thread +# + +class Queue(object): + + def __init__(self, maxsize=0, *, ctx): + if maxsize <= 0: + # Can raise ImportError (see issues #3770 and #23400) + from .synchronize import SEM_VALUE_MAX as maxsize + self._maxsize = maxsize + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._opid = os.getpid() + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + self._sem = ctx.BoundedSemaphore(maxsize) + # For use by concurrent.futures + self._ignore_epipe = False + self._reset() + + if sys.platform != 'win32': + register_after_fork(self, Queue._after_fork) + + def __getstate__(self): + context.assert_spawning(self) + return (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) + + def __setstate__(self, state): + (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) = state + self._reset() + + def _after_fork(self): + debug('Queue._after_fork()') + self._reset(after_fork=True) + + def _reset(self, after_fork=False): + if after_fork: + self._notempty._at_fork_reinit() + else: + self._notempty = threading.Condition(threading.Lock()) + self._buffer = collections.deque() + self._thread = None + self._jointhread = None + self._joincancelled = False + self._closed = False + self._close = None + self._send_bytes = self._writer.send_bytes + self._recv_bytes = self._reader.recv_bytes + self._poll = self._reader.poll + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._notempty.notify() + + def get(self, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if block and timeout is None: + with self._rlock: + res = self._recv_bytes() + self._sem.release() + else: + if block: + deadline = getattr(time,'monotonic',time.time)() + timeout + if not self._rlock.acquire(block, timeout): + raise Empty + try: + if block: + timeout = deadline - getattr(time,'monotonic',time.time)() + if not self._poll(timeout): + raise Empty + elif not self._poll(): + raise Empty + res = self._recv_bytes() + self._sem.release() + finally: + self._rlock.release() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def qsize(self): + # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() + return self._maxsize - self._sem._semlock._get_value() + + def empty(self): + return not self._poll() + + def full(self): + return self._sem._semlock._is_zero() + + def get_nowait(self): + return self.get(False) + + def put_nowait(self, obj): + return self.put(obj, False) + + def close(self): + self._closed = True + try: + self._reader.close() + finally: + close = self._close + if close: + self._close = None + close() + + def join_thread(self): + debug('Queue.join_thread()') + assert self._closed, "Queue {0!r} not closed".format(self) + if self._jointhread: + self._jointhread() + + def cancel_join_thread(self): + debug('Queue.cancel_join_thread()') + self._joincancelled = True + try: + self._jointhread.cancel() + except AttributeError: + pass + + def _start_thread(self): + debug('Queue._start_thread()') + + # Start thread which transfers data from buffer to pipe + self._buffer.clear() + self._thread = threading.Thread( + target=Queue._feed, + args=(self._buffer, self._notempty, self._send_bytes, + self._wlock, self._writer.close, self._ignore_epipe, + self._on_queue_feeder_error, self._sem), + name='QueueFeederThread' + ) + self._thread.daemon = True + + debug('doing self._thread.start()') + self._thread.start() + debug('... done self._thread.start()') + + if not self._joincancelled: + self._jointhread = Finalize( + self._thread, Queue._finalize_join, + [weakref.ref(self._thread)], + exitpriority=-5 + ) + + # Send sentinel to the thread queue object when garbage collected + self._close = Finalize( + self, Queue._finalize_close, + [self._buffer, self._notempty], + exitpriority=10 + ) + + @staticmethod + def _finalize_join(twr): + debug('joining queue thread') + thread = twr() + if thread is not None: + thread.join() + debug('... queue thread joined') + else: + debug('... queue thread already dead') + + @staticmethod + def _finalize_close(buffer, notempty): + debug('telling queue thread to quit') + with notempty: + buffer.append(_sentinel) + notempty.notify() + + @staticmethod + def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe, + onerror, queue_sem): + debug('starting thread to feed data to pipe') + nacquire = notempty.acquire + nrelease = notempty.release + nwait = notempty.wait + bpopleft = buffer.popleft + sentinel = _sentinel + if sys.platform != 'win32': + wacquire = writelock.acquire + wrelease = writelock.release + else: + wacquire = None + + while 1: + try: + nacquire() + try: + if not buffer: + nwait() + finally: + nrelease() + try: + while 1: + obj = bpopleft() + if obj is sentinel: + debug('feeder thread got sentinel -- exiting') + close() + return + + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if wacquire is None: + send_bytes(obj) + else: + wacquire() + try: + send_bytes(obj) + finally: + wrelease() + except IndexError: + pass + except Exception as e: + if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + if is_exiting(): + info('error in queue thread: %s', e) + return + else: + # Since the object has not been sent in the queue, we need + # to decrease the size of the queue. The error acts as + # if the object had been silently removed from the queue + # and this step is necessary to have a properly working + # queue. + queue_sem.release() + onerror(e, obj) + + @staticmethod + def _on_queue_feeder_error(e, obj): + """ + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + """ + import traceback + traceback.print_exc() + + +_sentinel = object() + +# +# A queue type which also supports join() and task_done() methods +# +# Note that if you do not call task_done() for each finished task then +# eventually the counter's semaphore may overflow causing Bad Things +# to happen. +# + +class JoinableQueue(Queue): + + def __init__(self, maxsize=0, *, ctx): + Queue.__init__(self, maxsize, ctx=ctx) + self._unfinished_tasks = ctx.Semaphore(0) + self._cond = ctx.Condition() + + def __getstate__(self): + return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) + + def __setstate__(self, state): + Queue.__setstate__(self, state[:-2]) + self._cond, self._unfinished_tasks = state[-2:] + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty, self._cond: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._unfinished_tasks.release() + self._notempty.notify() + + def task_done(self): + with self._cond: + if not self._unfinished_tasks.acquire(False): + raise ValueError('task_done() called too many times') + if self._unfinished_tasks._semlock._is_zero(): + self._cond.notify_all() + + def join(self): + with self._cond: + if not self._unfinished_tasks._semlock._is_zero(): + self._cond.wait() + +# +# Simplified Queue type -- really just a locked pipe +# + +class SimpleQueue(object): + + def __init__(self, *, ctx): + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._poll = self._reader.poll + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + + def close(self): + self._reader.close() + self._writer.close() + + def empty(self): + return not self._poll() + + def __getstate__(self): + context.assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock) = state + self._poll = self._reader.poll + + def get(self): + with self._rlock: + res = self._reader.recv_bytes() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def put(self, obj): + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self._writer.send_bytes(obj) + else: + with self._wlock: + self._writer.send_bytes(obj) + + __class_getitem__ = classmethod(types.GenericAlias) diff --git a/lib/python3.10/site-packages/multiprocess/resource_sharer.py b/lib/python3.10/site-packages/multiprocess/resource_sharer.py new file mode 100644 index 0000000000000000000000000000000000000000..66076509a1202e7a1b4d8a481f64621a4bfbbf3e --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/resource_sharer.py @@ -0,0 +1,154 @@ +# +# We use a background thread for sharing fds on Unix, and for sharing sockets on +# Windows. +# +# A client which wants to pickle a resource registers it with the resource +# sharer and gets an identifier in return. The unpickling process will connect +# to the resource sharer, sends the identifier and its pid, and then receives +# the resource. +# + +import os +import signal +import socket +import sys +import threading + +from . import process +from .context import reduction +from . import util + +__all__ = ['stop'] + + +if sys.platform == 'win32': + __all__ += ['DupSocket'] + + class DupSocket(object): + '''Picklable wrapper for a socket.''' + def __init__(self, sock): + new_sock = sock.dup() + def send(conn, pid): + share = new_sock.share(pid) + conn.send_bytes(share) + self._id = _resource_sharer.register(send, new_sock.close) + + def detach(self): + '''Get the socket. This should only be called once.''' + with _resource_sharer.get_connection(self._id) as conn: + share = conn.recv_bytes() + return socket.fromshare(share) + +else: + __all__ += ['DupFd'] + + class DupFd(object): + '''Wrapper for fd which can be used at any time.''' + def __init__(self, fd): + new_fd = os.dup(fd) + def send(conn, pid): + reduction.send_handle(conn, new_fd, pid) + def close(): + os.close(new_fd) + self._id = _resource_sharer.register(send, close) + + def detach(self): + '''Get the fd. This should only be called once.''' + with _resource_sharer.get_connection(self._id) as conn: + return reduction.recv_handle(conn) + + +class _ResourceSharer(object): + '''Manager for resources using background thread.''' + def __init__(self): + self._key = 0 + self._cache = {} + self._lock = threading.Lock() + self._listener = None + self._address = None + self._thread = None + util.register_after_fork(self, _ResourceSharer._afterfork) + + def register(self, send, close): + '''Register resource, returning an identifier.''' + with self._lock: + if self._address is None: + self._start() + self._key += 1 + self._cache[self._key] = (send, close) + return (self._address, self._key) + + @staticmethod + def get_connection(ident): + '''Return connection from which to receive identified resource.''' + from .connection import Client + address, key = ident + c = Client(address, authkey=process.current_process().authkey) + c.send((key, os.getpid())) + return c + + def stop(self, timeout=None): + '''Stop the background thread and clear registered resources.''' + from .connection import Client + with self._lock: + if self._address is not None: + c = Client(self._address, + authkey=process.current_process().authkey) + c.send(None) + c.close() + self._thread.join(timeout) + if self._thread.is_alive(): + util.sub_warning('_ResourceSharer thread did ' + 'not stop when asked') + self._listener.close() + self._thread = None + self._address = None + self._listener = None + for key, (send, close) in self._cache.items(): + close() + self._cache.clear() + + def _afterfork(self): + for key, (send, close) in self._cache.items(): + close() + self._cache.clear() + self._lock._at_fork_reinit() + if self._listener is not None: + self._listener.close() + self._listener = None + self._address = None + self._thread = None + + def _start(self): + from .connection import Listener + assert self._listener is None, "Already have Listener" + util.debug('starting listener and thread for sending handles') + self._listener = Listener(authkey=process.current_process().authkey) + self._address = self._listener.address + t = threading.Thread(target=self._serve) + t.daemon = True + t.start() + self._thread = t + + def _serve(self): + if hasattr(signal, 'pthread_sigmask'): + signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) + while 1: + try: + with self._listener.accept() as conn: + msg = conn.recv() + if msg is None: + break + key, destination_pid = msg + send, close = self._cache.pop(key) + try: + send(conn, destination_pid) + finally: + close() + except: + if not util.is_exiting(): + sys.excepthook(*sys.exc_info()) + + +_resource_sharer = _ResourceSharer() +stop = _resource_sharer.stop diff --git a/lib/python3.10/site-packages/multiprocess/resource_tracker.py b/lib/python3.10/site-packages/multiprocess/resource_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..aae28a9080029b3bdc6dc5315d9343fd43a4f0f6 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/resource_tracker.py @@ -0,0 +1,234 @@ +############################################################################### +# Server process to keep track of unlinked resources (like shared memory +# segments, semaphores etc.) and clean them. +# +# On Unix we run a server process which keeps track of unlinked +# resources. The server ignores SIGINT and SIGTERM and reads from a +# pipe. Every other process of the program has a copy of the writable +# end of the pipe, so we get EOF when all other processes have exited. +# Then the server process unlinks any remaining resource names. +# +# This is important because there may be system limits for such resources: for +# instance, the system only supports a limited number of named semaphores, and +# shared-memory segments live in the RAM. If a python process leaks such a +# resource, this resource will not be removed till the next reboot. Without +# this resource tracker process, "killall python" would probably leave unlinked +# resources. + +import os +import signal +import sys +import threading +import warnings + +from . import spawn +from . import util + +__all__ = ['ensure_running', 'register', 'unregister'] + +_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') +_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) + +_CLEANUP_FUNCS = { + 'noop': lambda: None, +} + +if os.name == 'posix': + try: + import _multiprocess as _multiprocessing + except ImportError: + import _multiprocessing + import _posixshmem + + _CLEANUP_FUNCS.update({ + 'semaphore': _multiprocessing.sem_unlink, + 'shared_memory': _posixshmem.shm_unlink, + }) + + +class ResourceTracker(object): + + def __init__(self): + self._lock = threading.Lock() + self._fd = None + self._pid = None + + def _stop(self): + with self._lock: + if self._fd is None: + # not running + return + + # closing the "alive" file descriptor stops main() + os.close(self._fd) + self._fd = None + + os.waitpid(self._pid, 0) + self._pid = None + + def getfd(self): + self.ensure_running() + return self._fd + + def ensure_running(self): + '''Make sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent.''' + with self._lock: + if self._fd is not None: + # resource tracker was launched before, is it still running? + if self._check_alive(): + # => still alive + return + # => dead, launch it again + os.close(self._fd) + + # Clean-up to avoid dangling processes. + try: + # _pid can be None if this process is a child from another + # python process, which has started the resource_tracker. + if self._pid is not None: + os.waitpid(self._pid, 0) + except ChildProcessError: + # The resource_tracker has already been terminated. + pass + self._fd = None + self._pid = None + + warnings.warn('resource_tracker: process died unexpectedly, ' + 'relaunching. Some resources might leak.') + + fds_to_pass = [] + try: + fds_to_pass.append(sys.stderr.fileno()) + except Exception: + pass + cmd = 'from multiprocess.resource_tracker import main;main(%d)' + r, w = os.pipe() + try: + fds_to_pass.append(r) + # process will out live us, so no need to wait on pid + exe = spawn.get_executable() + args = [exe] + util._args_from_interpreter_flags() + args += ['-c', cmd % r] + # bpo-33613: Register a signal mask that will block the signals. + # This signal mask will be inherited by the child that is going + # to be spawned and will protect the child from a race condition + # that can make the child die before it registers signal handlers + # for SIGINT and SIGTERM. The mask is unregistered after spawning + # the child. + try: + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) + pid = util.spawnv_passfds(exe, args, fds_to_pass) + finally: + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + except: + os.close(w) + raise + else: + self._fd = w + self._pid = pid + finally: + os.close(r) + + def _check_alive(self): + '''Check that the pipe has not been closed by sending a probe.''' + try: + # We cannot use send here as it calls ensure_running, creating + # a cycle. + os.write(self._fd, b'PROBE:0:noop\n') + except OSError: + return False + else: + return True + + def register(self, name, rtype): + '''Register name of resource with resource tracker.''' + self._send('REGISTER', name, rtype) + + def unregister(self, name, rtype): + '''Unregister name of resource with resource tracker.''' + self._send('UNREGISTER', name, rtype) + + def _send(self, cmd, name, rtype): + self.ensure_running() + msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') + if len(name) > 512: + # posix guarantees that writes to a pipe of less than PIPE_BUF + # bytes are atomic, and that PIPE_BUF >= 512 + raise ValueError('name too long') + nbytes = os.write(self._fd, msg) + assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( + nbytes, len(msg)) + + +_resource_tracker = ResourceTracker() +ensure_running = _resource_tracker.ensure_running +register = _resource_tracker.register +unregister = _resource_tracker.unregister +getfd = _resource_tracker.getfd + +def main(fd): + '''Run resource tracker.''' + # protect the process from ^C and "killall python" etc + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + + for f in (sys.stdin, sys.stdout): + try: + f.close() + except Exception: + pass + + cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} + try: + # keep track of registered/unregistered resources + with open(fd, 'rb') as f: + for line in f: + try: + cmd, name, rtype = line.strip().decode('ascii').split(':') + cleanup_func = _CLEANUP_FUNCS.get(rtype, None) + if cleanup_func is None: + raise ValueError( + f'Cannot register {name} for automatic cleanup: ' + f'unknown resource type {rtype}') + + if cmd == 'REGISTER': + cache[rtype].add(name) + elif cmd == 'UNREGISTER': + cache[rtype].remove(name) + elif cmd == 'PROBE': + pass + else: + raise RuntimeError('unrecognized command %r' % cmd) + except Exception: + try: + sys.excepthook(*sys.exc_info()) + except: + pass + finally: + # all processes have terminated; cleanup any remaining resources + for rtype, rtype_cache in cache.items(): + if rtype_cache: + try: + warnings.warn('resource_tracker: There appear to be %d ' + 'leaked %s objects to clean up at shutdown' % + (len(rtype_cache), rtype)) + except Exception: + pass + for name in rtype_cache: + # For some reason the process which created and registered this + # resource has failed to unregister it. Presumably it has + # died. We therefore unlink it. + try: + try: + _CLEANUP_FUNCS[rtype](name) + except Exception as e: + warnings.warn('resource_tracker: %r: %s' % (name, e)) + finally: + pass diff --git a/lib/python3.10/site-packages/multiprocess/shared_memory.py b/lib/python3.10/site-packages/multiprocess/shared_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..9e8ec93c82131fa407cdd5fdc418f882705b2dae --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/shared_memory.py @@ -0,0 +1,535 @@ +"""Provides shared memory for direct access across processes. + +The API of this package is currently provisional. Refer to the +documentation for details. +""" + + +__all__ = [ 'SharedMemory', 'ShareableList' ] + + +from functools import partial +import mmap +import os +import errno +import struct +import secrets +import types + +if os.name == "nt": + import _winapi + _USE_POSIX = False +else: + import _posixshmem + _USE_POSIX = True + + +_O_CREX = os.O_CREAT | os.O_EXCL + +# FreeBSD (and perhaps other BSDs) limit names to 14 characters. +_SHM_SAFE_NAME_LENGTH = 14 + +# Shared memory block name prefix +if _USE_POSIX: + _SHM_NAME_PREFIX = '/psm_' +else: + _SHM_NAME_PREFIX = 'wnsm_' + + +def _make_filename(): + "Create a random filename for the shared memory object." + # number of random bytes to use for name + nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 + assert nbytes >= 2, '_SHM_NAME_PREFIX too long' + name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) + assert len(name) <= _SHM_SAFE_NAME_LENGTH + return name + + +class SharedMemory: + """Creates a new shared memory block or attaches to an existing + shared memory block. + + Every shared memory block is assigned a unique name. This enables + one process to create a shared memory block with a particular name + so that a different process can attach to that same shared memory + block using that same name. + + As a resource for sharing data across processes, shared memory blocks + may outlive the original process that created them. When one process + no longer needs access to a shared memory block that might still be + needed by other processes, the close() method should be called. + When a shared memory block is no longer needed by any process, the + unlink() method should be called to ensure proper cleanup.""" + + # Defaults; enables close() and unlink() to run without errors. + _name = None + _fd = -1 + _mmap = None + _buf = None + _flags = os.O_RDWR + _mode = 0o600 + _prepend_leading_slash = True if _USE_POSIX else False + + def __init__(self, name=None, create=False, size=0): + if not size >= 0: + raise ValueError("'size' must be a positive integer") + if create: + self._flags = _O_CREX | os.O_RDWR + if size == 0: + raise ValueError("'size' must be a positive number different from zero") + if name is None and not self._flags & os.O_EXCL: + raise ValueError("'name' can only be None if create=True") + + if _USE_POSIX: + + # POSIX Shared Memory + + if name is None: + while True: + name = _make_filename() + try: + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + except FileExistsError: + continue + self._name = name + break + else: + name = "/" + name if self._prepend_leading_slash else name + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + self._name = name + try: + if create and size: + os.ftruncate(self._fd, size) + stats = os.fstat(self._fd) + size = stats.st_size + self._mmap = mmap.mmap(self._fd, size) + except OSError: + self.unlink() + raise + + from .resource_tracker import register + register(self._name, "shared_memory") + + else: + + # Windows Named Shared Memory + + if create: + while True: + temp_name = _make_filename() if name is None else name + # Create and reserve shared memory block with this name + # until it can be attached to by mmap. + h_map = _winapi.CreateFileMapping( + _winapi.INVALID_HANDLE_VALUE, + _winapi.NULL, + _winapi.PAGE_READWRITE, + (size >> 32) & 0xFFFFFFFF, + size & 0xFFFFFFFF, + temp_name + ) + try: + last_error_code = _winapi.GetLastError() + if last_error_code == _winapi.ERROR_ALREADY_EXISTS: + if name is not None: + raise FileExistsError( + errno.EEXIST, + os.strerror(errno.EEXIST), + name, + _winapi.ERROR_ALREADY_EXISTS + ) + else: + continue + self._mmap = mmap.mmap(-1, size, tagname=temp_name) + finally: + _winapi.CloseHandle(h_map) + self._name = temp_name + break + + else: + self._name = name + # Dynamically determine the existing named shared memory + # block's size which is likely a multiple of mmap.PAGESIZE. + h_map = _winapi.OpenFileMapping( + _winapi.FILE_MAP_READ, + False, + name + ) + try: + p_buf = _winapi.MapViewOfFile( + h_map, + _winapi.FILE_MAP_READ, + 0, + 0, + 0 + ) + finally: + _winapi.CloseHandle(h_map) + size = _winapi.VirtualQuerySize(p_buf) + self._mmap = mmap.mmap(-1, size, tagname=name) + + self._size = size + self._buf = memoryview(self._mmap) + + def __del__(self): + try: + self.close() + except OSError: + pass + + def __reduce__(self): + return ( + self.__class__, + ( + self.name, + False, + self.size, + ), + ) + + def __repr__(self): + return f'{self.__class__.__name__}({self.name!r}, size={self.size})' + + @property + def buf(self): + "A memoryview of contents of the shared memory block." + return self._buf + + @property + def name(self): + "Unique name that identifies the shared memory block." + reported_name = self._name + if _USE_POSIX and self._prepend_leading_slash: + if self._name.startswith("/"): + reported_name = self._name[1:] + return reported_name + + @property + def size(self): + "Size in bytes." + return self._size + + def close(self): + """Closes access to the shared memory from this instance but does + not destroy the shared memory block.""" + if self._buf is not None: + self._buf.release() + self._buf = None + if self._mmap is not None: + self._mmap.close() + self._mmap = None + if _USE_POSIX and self._fd >= 0: + os.close(self._fd) + self._fd = -1 + + def unlink(self): + """Requests that the underlying shared memory block be destroyed. + + In order to ensure proper cleanup of resources, unlink should be + called once (and only once) across all processes which have access + to the shared memory block.""" + if _USE_POSIX and self._name: + from .resource_tracker import unregister + _posixshmem.shm_unlink(self._name) + unregister(self._name, "shared_memory") + + +_encoding = "utf8" + +class ShareableList: + """Pattern for a mutable list-like object shareable via a shared + memory block. It differs from the built-in list type in that these + lists can not change their overall length (i.e. no append, insert, + etc.) + + Because values are packed into a memoryview as bytes, the struct + packing format for any storable value must require no more than 8 + characters to describe its format.""" + + # The shared memory area is organized as follows: + # - 8 bytes: number of items (N) as a 64-bit integer + # - (N + 1) * 8 bytes: offsets of each element from the start of the + # data area + # - K bytes: the data area storing item values (with encoding and size + # depending on their respective types) + # - N * 8 bytes: `struct` format string for each element + # - N bytes: index into _back_transforms_mapping for each element + # (for reconstructing the corresponding Python value) + _types_mapping = { + int: "q", + float: "d", + bool: "xxxxxxx?", + str: "%ds", + bytes: "%ds", + None.__class__: "xxxxxx?x", + } + _alignment = 8 + _back_transforms_mapping = { + 0: lambda value: value, # int, float, bool + 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str + 2: lambda value: value.rstrip(b'\x00'), # bytes + 3: lambda _value: None, # None + } + + @staticmethod + def _extract_recreation_code(value): + """Used in concert with _back_transforms_mapping to convert values + into the appropriate Python objects when retrieving them from + the list as well as when storing them.""" + if not isinstance(value, (str, bytes, None.__class__)): + return 0 + elif isinstance(value, str): + return 1 + elif isinstance(value, bytes): + return 2 + else: + return 3 # NoneType + + def __init__(self, sequence=None, *, name=None): + if name is None or sequence is not None: + sequence = sequence or () + _formats = [ + self._types_mapping[type(item)] + if not isinstance(item, (str, bytes)) + else self._types_mapping[type(item)] % ( + self._alignment * (len(item) // self._alignment + 1), + ) + for item in sequence + ] + self._list_len = len(_formats) + assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len + offset = 0 + # The offsets of each list element into the shared memory's + # data area (0 meaning the start of the data area, not the start + # of the shared memory area). + self._allocated_offsets = [0] + for fmt in _formats: + offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) + self._allocated_offsets.append(offset) + _recreation_codes = [ + self._extract_recreation_code(item) for item in sequence + ] + _recreation_codes = [ + self._extract_recreation_code(item) for item in sequence + ] + requested_size = struct.calcsize( + "q" + self._format_size_metainfo + + "".join(_formats) + + self._format_packing_metainfo + + self._format_back_transform_codes + ) + + self.shm = SharedMemory(name, create=True, size=requested_size) + else: + self.shm = SharedMemory(name) + + if sequence is not None: + _enc = _encoding + struct.pack_into( + "q" + self._format_size_metainfo, + self.shm.buf, + 0, + self._list_len, + *(self._allocated_offsets) + ) + struct.pack_into( + "".join(_formats), + self.shm.buf, + self._offset_data_start, + *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) + ) + struct.pack_into( + self._format_packing_metainfo, + self.shm.buf, + self._offset_packing_formats, + *(v.encode(_enc) for v in _formats) + ) + struct.pack_into( + self._format_back_transform_codes, + self.shm.buf, + self._offset_back_transform_codes, + *(_recreation_codes) + ) + + else: + self._list_len = len(self) # Obtains size from offset 0 in buffer. + self._allocated_offsets = list( + struct.unpack_from( + self._format_size_metainfo, + self.shm.buf, + 1 * 8 + ) + ) + + def _get_packing_format(self, position): + "Gets the packing format for a single value stored in the list." + position = position if position >= 0 else position + self._list_len + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + v = struct.unpack_from( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8 + )[0] + fmt = v.rstrip(b'\x00') + fmt_as_str = fmt.decode(_encoding) + + return fmt_as_str + + def _get_back_transform(self, position): + "Gets the back transformation function for a single value." + + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + transform_code = struct.unpack_from( + "b", + self.shm.buf, + self._offset_back_transform_codes + position + )[0] + transform_function = self._back_transforms_mapping[transform_code] + + return transform_function + + def _set_packing_format_and_transform(self, position, fmt_as_str, value): + """Sets the packing format and back transformation code for a + single value in the list at the specified position.""" + + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + struct.pack_into( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8, + fmt_as_str.encode(_encoding) + ) + + transform_code = self._extract_recreation_code(value) + struct.pack_into( + "b", + self.shm.buf, + self._offset_back_transform_codes + position, + transform_code + ) + + def __getitem__(self, position): + position = position if position >= 0 else position + self._list_len + try: + offset = self._offset_data_start + self._allocated_offsets[position] + (v,) = struct.unpack_from( + self._get_packing_format(position), + self.shm.buf, + offset + ) + except IndexError: + raise IndexError("index out of range") + + back_transform = self._get_back_transform(position) + v = back_transform(v) + + return v + + def __setitem__(self, position, value): + position = position if position >= 0 else position + self._list_len + try: + item_offset = self._allocated_offsets[position] + offset = self._offset_data_start + item_offset + current_format = self._get_packing_format(position) + except IndexError: + raise IndexError("assignment index out of range") + + if not isinstance(value, (str, bytes)): + new_format = self._types_mapping[type(value)] + encoded_value = value + else: + allocated_length = self._allocated_offsets[position + 1] - item_offset + + encoded_value = (value.encode(_encoding) + if isinstance(value, str) else value) + if len(encoded_value) > allocated_length: + raise ValueError("bytes/str item exceeds available storage") + if current_format[-1] == "s": + new_format = current_format + else: + new_format = self._types_mapping[str] % ( + allocated_length, + ) + + self._set_packing_format_and_transform( + position, + new_format, + value + ) + struct.pack_into(new_format, self.shm.buf, offset, encoded_value) + + def __reduce__(self): + return partial(self.__class__, name=self.shm.name), () + + def __len__(self): + return struct.unpack_from("q", self.shm.buf, 0)[0] + + def __repr__(self): + return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' + + @property + def format(self): + "The struct packing format used by all currently stored items." + return "".join( + self._get_packing_format(i) for i in range(self._list_len) + ) + + @property + def _format_size_metainfo(self): + "The struct packing format used for the items' storage offsets." + return "q" * (self._list_len + 1) + + @property + def _format_packing_metainfo(self): + "The struct packing format used for the items' packing formats." + return "8s" * self._list_len + + @property + def _format_back_transform_codes(self): + "The struct packing format used for the items' back transforms." + return "b" * self._list_len + + @property + def _offset_data_start(self): + # - 8 bytes for the list length + # - (N + 1) * 8 bytes for the element offsets + return (self._list_len + 2) * 8 + + @property + def _offset_packing_formats(self): + return self._offset_data_start + self._allocated_offsets[-1] + + @property + def _offset_back_transform_codes(self): + return self._offset_packing_formats + self._list_len * 8 + + def count(self, value): + "L.count(value) -> integer -- return number of occurrences of value." + + return sum(value == entry for entry in self) + + def index(self, value): + """L.index(value) -> integer -- return first index of value. + Raises ValueError if the value is not present.""" + + for position, entry in enumerate(self): + if value == entry: + return position + else: + raise ValueError(f"{value!r} not in this container") + + __class_getitem__ = classmethod(types.GenericAlias) diff --git a/lib/python3.10/site-packages/multiprocess/spawn.py b/lib/python3.10/site-packages/multiprocess/spawn.py new file mode 100644 index 0000000000000000000000000000000000000000..2c0283d80c7799a09e83b363e3f956ebbb665e42 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/spawn.py @@ -0,0 +1,297 @@ +# +# Code used to start processes when using the spawn or forkserver +# start methods. +# +# multiprocessing/spawn.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import os +import sys +import runpy +import types + +from . import get_start_method, set_start_method +from . import process +from .context import reduction +from . import util + +__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', + 'get_preparation_data', 'get_command_line', 'import_main_path'] + +# +# _python_exe is the assumed path to the python executable. +# People embedding Python want to modify it. +# + +if sys.platform != 'win32': + WINEXE = False + WINSERVICE = False +else: + WINEXE = getattr(sys, 'frozen', False) + WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + +if WINSERVICE: + _python_exe = os.path.join(sys.exec_prefix, 'python.exe') +else: + _python_exe = sys.executable + +def set_executable(exe): + global _python_exe + _python_exe = exe + +def get_executable(): + return _python_exe + +# +# +# + +def is_forking(argv): + ''' + Return whether commandline indicates we are forking + ''' + if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': + return True + else: + return False + + +def freeze_support(): + ''' + Run code for process object if this in not the main process + ''' + if is_forking(sys.argv): + kwds = {} + for arg in sys.argv[2:]: + name, value = arg.split('=') + if value == 'None': + kwds[name] = None + else: + kwds[name] = int(value) + spawn_main(**kwds) + sys.exit() + + +def get_command_line(**kwds): + ''' + Returns prefix of command line used for spawning a child process + ''' + if getattr(sys, 'frozen', False): + return ([sys.executable, '--multiprocessing-fork'] + + ['%s=%r' % item for item in kwds.items()]) + else: + prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' + prog %= ', '.join('%s=%r' % item for item in kwds.items()) + opts = util._args_from_interpreter_flags() + return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] + + +def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): + ''' + Run code specified by data received over pipe + ''' + assert is_forking(sys.argv), "Not forking" + if sys.platform == 'win32': + import msvcrt + import _winapi + + if parent_pid is not None: + source_process = _winapi.OpenProcess( + _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, + False, parent_pid) + else: + source_process = None + new_handle = reduction.duplicate(pipe_handle, + source_process=source_process) + fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) + parent_sentinel = source_process + else: + from . import resource_tracker + resource_tracker._resource_tracker._fd = tracker_fd + fd = pipe_handle + parent_sentinel = os.dup(pipe_handle) + exitcode = _main(fd, parent_sentinel) + sys.exit(exitcode) + + +def _main(fd, parent_sentinel): + with os.fdopen(fd, 'rb', closefd=True) as from_parent: + process.current_process()._inheriting = True + try: + preparation_data = reduction.pickle.load(from_parent) + prepare(preparation_data) + self = reduction.pickle.load(from_parent) + finally: + del process.current_process()._inheriting + return self._bootstrap(parent_sentinel) + + +def _check_not_importing_main(): + if getattr(process.current_process(), '_inheriting', False): + raise RuntimeError(''' + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable.''') + + +def get_preparation_data(name): + ''' + Return info about parent needed by child to unpickle process object + ''' + _check_not_importing_main() + d = dict( + log_to_stderr=util._log_to_stderr, + authkey=process.current_process().authkey, + ) + + if util._logger is not None: + d['log_level'] = util._logger.getEffectiveLevel() + + sys_path=sys.path.copy() + try: + i = sys_path.index('') + except ValueError: + pass + else: + sys_path[i] = process.ORIGINAL_DIR + + d.update( + name=name, + sys_path=sys_path, + sys_argv=sys.argv, + orig_dir=process.ORIGINAL_DIR, + dir=os.getcwd(), + start_method=get_start_method(), + ) + + # Figure out whether to initialise main in the subprocess as a module + # or through direct execution (or to leave it alone entirely) + main_module = sys.modules['__main__'] + main_mod_name = getattr(main_module.__spec__, "name", None) + if main_mod_name is not None: + d['init_main_from_name'] = main_mod_name + elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): + main_path = getattr(main_module, '__file__', None) + if main_path is not None: + if (not os.path.isabs(main_path) and + process.ORIGINAL_DIR is not None): + main_path = os.path.join(process.ORIGINAL_DIR, main_path) + d['init_main_from_path'] = os.path.normpath(main_path) + + return d + +# +# Prepare current process +# + +old_main_modules = [] + +def prepare(data): + ''' + Try to get current process ready to unpickle process object + ''' + if 'name' in data: + process.current_process().name = data['name'] + + if 'authkey' in data: + process.current_process().authkey = data['authkey'] + + if 'log_to_stderr' in data and data['log_to_stderr']: + util.log_to_stderr() + + if 'log_level' in data: + util.get_logger().setLevel(data['log_level']) + + if 'sys_path' in data: + sys.path = data['sys_path'] + + if 'sys_argv' in data: + sys.argv = data['sys_argv'] + + if 'dir' in data: + os.chdir(data['dir']) + + if 'orig_dir' in data: + process.ORIGINAL_DIR = data['orig_dir'] + + if 'start_method' in data: + set_start_method(data['start_method'], force=True) + + if 'init_main_from_name' in data: + _fixup_main_from_name(data['init_main_from_name']) + elif 'init_main_from_path' in data: + _fixup_main_from_path(data['init_main_from_path']) + +# Multiprocessing module helpers to fix up the main module in +# spawned subprocesses +def _fixup_main_from_name(mod_name): + # __main__.py files for packages, directories, zip archives, etc, run + # their "main only" code unconditionally, so we don't even try to + # populate anything in __main__, nor do we make any changes to + # __main__ attributes + current_main = sys.modules['__main__'] + if mod_name == "__main__" or mod_name.endswith(".__main__"): + return + + # If this process was forked, __main__ may already be populated + if getattr(current_main.__spec__, "name", None) == mod_name: + return + + # Otherwise, __main__ may contain some non-main code where we need to + # support unpickling it properly. We rerun it as __mp_main__ and make + # the normal __main__ an alias to that + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_module(mod_name, + run_name="__mp_main__", + alter_sys=True) + main_module.__dict__.update(main_content) + sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module + + +def _fixup_main_from_path(main_path): + # If this process was forked, __main__ may already be populated + current_main = sys.modules['__main__'] + + # Unfortunately, the main ipython launch script historically had no + # "if __name__ == '__main__'" guard, so we work around that + # by treating it like a __main__.py file + # See https://github.com/ipython/ipython/issues/4698 + main_name = os.path.splitext(os.path.basename(main_path))[0] + if main_name == 'ipython': + return + + # Otherwise, if __file__ already has the setting we expect, + # there's nothing more to do + if getattr(current_main, '__file__', None) == main_path: + return + + # If the parent process has sent a path through rather than a module + # name we assume it is an executable script that may contain + # non-main code that needs to be executed + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_path(main_path, + run_name="__mp_main__") + main_module.__dict__.update(main_content) + sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module + + +def import_main_path(main_path): + ''' + Set sys.modules['__main__'] to module at main_path + ''' + _fixup_main_from_path(main_path) diff --git a/lib/python3.10/site-packages/multiprocess/tests/__init__.py b/lib/python3.10/site-packages/multiprocess/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f7fb722fcc1162385978ef79710fcda0248259a6 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/tests/__init__.py @@ -0,0 +1,5792 @@ +# +# Unit tests for the multiprocessing package +# + +import unittest +import unittest.mock +import queue as pyqueue +import time +import io +import itertools +import sys +import os +import gc +import errno +import signal +import array +import socket +import random +import logging +import subprocess +import struct +import operator +import pickle #XXX: use dill? +import weakref +import warnings +import test.support +import test.support.script_helper +from test import support +from test.support import hashlib_helper +from test.support import socket_helper + + +# Skip tests if _multiprocessing wasn't built. +_multiprocessing = test.support.import_module('_multiprocessing') +# Skip tests if sem_open implementation is broken. +test.support.import_module('multiprocess.synchronize') +import threading + +import multiprocess as multiprocessing +import multiprocess.connection +import multiprocess.dummy +import multiprocess.heap +import multiprocess.managers +import multiprocess.pool +import multiprocess.queues + +from multiprocess import util + +try: + from multiprocess import reduction + HAS_REDUCTION = reduction.HAVE_SEND_HANDLE +except ImportError: + HAS_REDUCTION = False + +try: + from multiprocess.sharedctypes import Value, copy + HAS_SHAREDCTYPES = True +except ImportError: + HAS_SHAREDCTYPES = False + +try: + from multiprocess import shared_memory + HAS_SHMEM = True +except ImportError: + HAS_SHMEM = False + +try: + import msvcrt +except ImportError: + msvcrt = None + + +def latin(s): + return s.encode('latin') + + +def close_queue(queue): + if isinstance(queue, multiprocessing.queues.Queue): + queue.close() + queue.join_thread() + + +def join_process(process): + # Since multiprocessing.Process has the same API than threading.Thread + # (join() and is_alive(), the support function can be reused + support.join_thread(process) + + +if os.name == "posix": + from multiprocess import resource_tracker + + def _resource_unlink(name, rtype): + resource_tracker._CLEANUP_FUNCS[rtype](name) + + +# +# Constants +# + +LOG_LEVEL = util.SUBWARNING +#LOG_LEVEL = logging.DEBUG + +DELTA = 0.1 +CHECK_TIMINGS = False # making true makes tests take a lot longer + # and can sometimes cause some non-serious + # failures because some calls block a bit + # longer than expected +if CHECK_TIMINGS: + TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 +else: + TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 + +HAVE_GETVALUE = not getattr(_multiprocessing, + 'HAVE_BROKEN_SEM_GETVALUE', False) + +WIN32 = (sys.platform == "win32") + +from multiprocess.connection import wait + +def wait_for_handle(handle, timeout): + if timeout is not None and timeout < 0.0: + timeout = None + return wait([handle], timeout) + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except: + MAXFD = 256 + +# To speed up tests when using the forkserver, we can preload these: +PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] + +# +# Some tests require ctypes +# + +try: + from ctypes import Structure, c_int, c_double, c_longlong +except ImportError: + Structure = object + c_int = c_double = c_longlong = None + + +def check_enough_semaphores(): + """Check that the system supports enough semaphores to run the test.""" + # minimum number of semaphores available according to POSIX + nsems_min = 256 + try: + nsems = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems == -1 or nsems >= nsems_min: + return + raise unittest.SkipTest("The OS doesn't support enough semaphores " + "to run the test (required: %d)." % nsems_min) + + +# +# Creates a wrapper for a function which records the time it takes to finish +# + +class TimingWrapper(object): + + def __init__(self, func): + self.func = func + self.elapsed = None + + def __call__(self, *args, **kwds): + t = getattr(time,'monotonic',time.time)() + try: + return self.func(*args, **kwds) + finally: + self.elapsed = getattr(time,'monotonic',time.time)() - t + +# +# Base class for test cases +# + +class BaseTestCase(object): + + ALLOWED_TYPES = ('processes', 'manager', 'threads') + + def assertTimingAlmostEqual(self, a, b): + if CHECK_TIMINGS: + self.assertAlmostEqual(a, b, 1) + + def assertReturnsIfImplemented(self, value, func, *args): + try: + res = func(*args) + except NotImplementedError: + pass + else: + return self.assertEqual(value, res) + + # For the sanity of Windows users, rather than crashing or freezing in + # multiple ways. + def __reduce__(self, *args): + raise NotImplementedError("shouldn't try to pickle a test case") + + __reduce_ex__ = __reduce__ + +# +# Return the value of a semaphore +# + +def get_value(self): + try: + return self.get_value() + except AttributeError: + try: + return self._Semaphore__value + except AttributeError: + try: + return self._value + except AttributeError: + raise NotImplementedError + +# +# Testcases +# + +class DummyCallable: + def __call__(self, q, c): + assert isinstance(c, DummyCallable) + q.put(5) + + +class _TestProcess(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def test_current(self): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + current = self.current_process() + authkey = current.authkey + + self.assertTrue(current.is_alive()) + self.assertTrue(not current.daemon) + self.assertIsInstance(authkey, bytes) + self.assertTrue(len(authkey) > 0) + self.assertEqual(current.ident, os.getpid()) + self.assertEqual(current.exitcode, None) + + def test_daemon_argument(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # By default uses the current process's daemon flag. + proc0 = self.Process(target=self._test) + self.assertEqual(proc0.daemon, self.current_process().daemon) + proc1 = self.Process(target=self._test, daemon=True) + self.assertTrue(proc1.daemon) + proc2 = self.Process(target=self._test, daemon=False) + self.assertFalse(proc2.daemon) + + @classmethod + def _test(cls, q, *args, **kwds): + current = cls.current_process() + q.put(args) + q.put(kwds) + q.put(current.name) + if cls.TYPE != 'threads': + q.put(bytes(current.authkey)) + q.put(current.pid) + + def test_parent_process_attributes(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + self.assertIsNone(self.parent_process()) + + rconn, wconn = self.Pipe(duplex=False) + p = self.Process(target=self._test_send_parent_process, args=(wconn,)) + p.start() + p.join() + parent_pid, parent_name = rconn.recv() + self.assertEqual(parent_pid, self.current_process().pid) + self.assertEqual(parent_pid, os.getpid()) + self.assertEqual(parent_name, self.current_process().name) + + @classmethod + def _test_send_parent_process(cls, wconn): + from multiprocess.process import parent_process + wconn.send([parent_process().pid, parent_process().name]) + + def _test_parent_process(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # Launch a child process. Make it launch a grandchild process. Kill the + # child process and make sure that the grandchild notices the death of + # its parent (a.k.a the child process). + rconn, wconn = self.Pipe(duplex=False) + p = self.Process( + target=self._test_create_grandchild_process, args=(wconn, )) + p.start() + + if not rconn.poll(timeout=support.LONG_TIMEOUT): + raise AssertionError("Could not communicate with child process") + parent_process_status = rconn.recv() + self.assertEqual(parent_process_status, "alive") + + p.terminate() + p.join() + + if not rconn.poll(timeout=support.LONG_TIMEOUT): + raise AssertionError("Could not communicate with child process") + parent_process_status = rconn.recv() + self.assertEqual(parent_process_status, "not alive") + + @classmethod + def _test_create_grandchild_process(cls, wconn): + p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) + p.start() + time.sleep(300) + + @classmethod + def _test_report_parent_status(cls, wconn): + from multiprocess.process import parent_process + wconn.send("alive" if parent_process().is_alive() else "not alive") + parent_process().join(timeout=support.SHORT_TIMEOUT) + wconn.send("alive" if parent_process().is_alive() else "not alive") + + def test_process(self): + q = self.Queue(1) + e = self.Event() + args = (q, 1, 2) + kwargs = {'hello':23, 'bye':2.54} + name = 'SomeProcess' + p = self.Process( + target=self._test, args=args, kwargs=kwargs, name=name + ) + p.daemon = True + current = self.current_process() + + if self.TYPE != 'threads': + self.assertEqual(p.authkey, current.authkey) + self.assertEqual(p.is_alive(), False) + self.assertEqual(p.daemon, True) + self.assertNotIn(p, self.active_children()) + self.assertTrue(type(self.active_children()) is list) + self.assertEqual(p.exitcode, None) + + p.start() + + self.assertEqual(p.exitcode, None) + self.assertEqual(p.is_alive(), True) + self.assertIn(p, self.active_children()) + + self.assertEqual(q.get(), args[1:]) + self.assertEqual(q.get(), kwargs) + self.assertEqual(q.get(), p.name) + if self.TYPE != 'threads': + self.assertEqual(q.get(), current.authkey) + self.assertEqual(q.get(), p.pid) + + p.join() + + self.assertEqual(p.exitcode, 0) + self.assertEqual(p.is_alive(), False) + self.assertNotIn(p, self.active_children()) + close_queue(q) + + @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") + def test_process_mainthread_native_id(self): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + current_mainthread_native_id = threading.main_thread().native_id + + q = self.Queue(1) + p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) + p.start() + + child_mainthread_native_id = q.get() + p.join() + close_queue(q) + + self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) + + @classmethod + def _test_process_mainthread_native_id(cls, q): + mainthread_native_id = threading.main_thread().native_id + q.put(mainthread_native_id) + + @classmethod + def _sleep_some(cls): + time.sleep(100) + + @classmethod + def _test_sleep(cls, delay): + time.sleep(delay) + + def _kill_process(self, meth): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + p = self.Process(target=self._sleep_some) + p.daemon = True + p.start() + + self.assertEqual(p.is_alive(), True) + self.assertIn(p, self.active_children()) + self.assertEqual(p.exitcode, None) + + join = TimingWrapper(p.join) + + self.assertEqual(join(0), None) + self.assertTimingAlmostEqual(join.elapsed, 0.0) + self.assertEqual(p.is_alive(), True) + + self.assertEqual(join(-1), None) + self.assertTimingAlmostEqual(join.elapsed, 0.0) + self.assertEqual(p.is_alive(), True) + + # XXX maybe terminating too soon causes the problems on Gentoo... + time.sleep(1) + + meth(p) + + if hasattr(signal, 'alarm'): + # On the Gentoo buildbot waitpid() often seems to block forever. + # We use alarm() to interrupt it if it blocks for too long. + def handler(*args): + raise RuntimeError('join took too long: %s' % p) + old_handler = signal.signal(signal.SIGALRM, handler) + try: + signal.alarm(10) + self.assertEqual(join(), None) + finally: + signal.alarm(0) + signal.signal(signal.SIGALRM, old_handler) + else: + self.assertEqual(join(), None) + + self.assertTimingAlmostEqual(join.elapsed, 0.0) + + self.assertEqual(p.is_alive(), False) + self.assertNotIn(p, self.active_children()) + + p.join() + + return p.exitcode + + def test_terminate(self): + exitcode = self._kill_process(multiprocessing.Process.terminate) + if os.name != 'nt': + self.assertEqual(exitcode, -signal.SIGTERM) + + def test_kill(self): + exitcode = self._kill_process(multiprocessing.Process.kill) + if os.name != 'nt': + self.assertEqual(exitcode, -signal.SIGKILL) + + def test_cpu_count(self): + try: + cpus = multiprocessing.cpu_count() + except NotImplementedError: + cpus = 1 + self.assertTrue(type(cpus) is int) + self.assertTrue(cpus >= 1) + + def test_active_children(self): + self.assertEqual(type(self.active_children()), list) + + p = self.Process(target=time.sleep, args=(DELTA,)) + self.assertNotIn(p, self.active_children()) + + p.daemon = True + p.start() + self.assertIn(p, self.active_children()) + + p.join() + self.assertNotIn(p, self.active_children()) + + @classmethod + def _test_recursion(cls, wconn, id): + wconn.send(id) + if len(id) < 2: + for i in range(2): + p = cls.Process( + target=cls._test_recursion, args=(wconn, id+[i]) + ) + p.start() + p.join() + + @unittest.skipIf(True, "fails with is_dill(obj, child=True)") + def test_recursion(self): + rconn, wconn = self.Pipe(duplex=False) + self._test_recursion(wconn, []) + + time.sleep(DELTA) + result = [] + while rconn.poll(): + result.append(rconn.recv()) + + expected = [ + [], + [0], + [0, 0], + [0, 1], + [1], + [1, 0], + [1, 1] + ] + self.assertEqual(result, expected) + + @classmethod + def _test_sentinel(cls, event): + event.wait(10.0) + + def test_sentinel(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + event = self.Event() + p = self.Process(target=self._test_sentinel, args=(event,)) + with self.assertRaises(ValueError): + p.sentinel + p.start() + self.addCleanup(p.join) + sentinel = p.sentinel + self.assertIsInstance(sentinel, int) + self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) + event.set() + p.join() + self.assertTrue(wait_for_handle(sentinel, timeout=1)) + + @classmethod + def _test_close(cls, rc=0, q=None): + if q is not None: + q.get() + sys.exit(rc) + + def test_close(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + q = self.Queue() + p = self.Process(target=self._test_close, kwargs={'q': q}) + p.daemon = True + p.start() + self.assertEqual(p.is_alive(), True) + # Child is still alive, cannot close + with self.assertRaises(ValueError): + p.close() + + q.put(None) + p.join() + self.assertEqual(p.is_alive(), False) + self.assertEqual(p.exitcode, 0) + p.close() + with self.assertRaises(ValueError): + p.is_alive() + with self.assertRaises(ValueError): + p.join() + with self.assertRaises(ValueError): + p.terminate() + p.close() + + wr = weakref.ref(p) + del p + gc.collect() + self.assertIs(wr(), None) + + close_queue(q) + + def test_many_processes(self): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + sm = multiprocessing.get_start_method() + N = 5 if sm == 'spawn' else 100 + + # Try to overwhelm the forkserver loop with events + procs = [self.Process(target=self._test_sleep, args=(0.01,)) + for i in range(N)] + for p in procs: + p.start() + for p in procs: + join_process(p) + for p in procs: + self.assertEqual(p.exitcode, 0) + + procs = [self.Process(target=self._sleep_some) + for i in range(N)] + for p in procs: + p.start() + time.sleep(0.001) # let the children start... + for p in procs: + p.terminate() + for p in procs: + join_process(p) + if os.name != 'nt': + exitcodes = [-signal.SIGTERM] + if sys.platform == 'darwin': + # bpo-31510: On macOS, killing a freshly started process with + # SIGTERM sometimes kills the process with SIGKILL. + exitcodes.append(-signal.SIGKILL) + for p in procs: + self.assertIn(p.exitcode, exitcodes) + + def test_lose_target_ref(self): + c = DummyCallable() + wr = weakref.ref(c) + q = self.Queue() + p = self.Process(target=c, args=(q, c)) + del c + p.start() + p.join() + self.assertIs(wr(), None) + self.assertEqual(q.get(), 5) + close_queue(q) + + @classmethod + def _test_child_fd_inflation(self, evt, q): + q.put(test.support.fd_count()) + evt.wait() + + def test_child_fd_inflation(self): + # Number of fds in child processes should not grow with the + # number of running children. + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + sm = multiprocessing.get_start_method() + if sm == 'fork': + # The fork method by design inherits all fds from the parent, + # trying to go against it is a lost battle + self.skipTest('test not appropriate for {}'.format(sm)) + + N = 5 + evt = self.Event() + q = self.Queue() + + procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) + for i in range(N)] + for p in procs: + p.start() + + try: + fd_counts = [q.get() for i in range(N)] + self.assertEqual(len(set(fd_counts)), 1, fd_counts) + + finally: + evt.set() + for p in procs: + p.join() + close_queue(q) + + @classmethod + def _test_wait_for_threads(self, evt): + def func1(): + time.sleep(0.5) + evt.set() + + def func2(): + time.sleep(20) + evt.clear() + + threading.Thread(target=func1).start() + threading.Thread(target=func2, daemon=True).start() + + def test_wait_for_threads(self): + # A child process should wait for non-daemonic threads to end + # before exiting + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + evt = self.Event() + proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) + proc.start() + proc.join() + self.assertTrue(evt.is_set()) + + @classmethod + def _test_error_on_stdio_flush(self, evt, break_std_streams={}): + for stream_name, action in break_std_streams.items(): + if action == 'close': + stream = io.StringIO() + stream.close() + else: + assert action == 'remove' + stream = None + setattr(sys, stream_name, None) + evt.set() + + def test_error_on_stdio_flush_1(self): + # Check that Process works with broken standard streams + streams = [io.StringIO(), None] + streams[0].close() + for stream_name in ('stdout', 'stderr'): + for stream in streams: + old_stream = getattr(sys, stream_name) + setattr(sys, stream_name, stream) + try: + evt = self.Event() + proc = self.Process(target=self._test_error_on_stdio_flush, + args=(evt,)) + proc.start() + proc.join() + self.assertTrue(evt.is_set()) + self.assertEqual(proc.exitcode, 0) + finally: + setattr(sys, stream_name, old_stream) + + def test_error_on_stdio_flush_2(self): + # Same as test_error_on_stdio_flush_1(), but standard streams are + # broken by the child process + for stream_name in ('stdout', 'stderr'): + for action in ('close', 'remove'): + old_stream = getattr(sys, stream_name) + try: + evt = self.Event() + proc = self.Process(target=self._test_error_on_stdio_flush, + args=(evt, {stream_name: action})) + proc.start() + proc.join() + self.assertTrue(evt.is_set()) + self.assertEqual(proc.exitcode, 0) + finally: + setattr(sys, stream_name, old_stream) + + @classmethod + def _sleep_and_set_event(self, evt, delay=0.0): + time.sleep(delay) + evt.set() + + def check_forkserver_death(self, signum): + # bpo-31308: if the forkserver process has died, we should still + # be able to create and run new Process instances (the forkserver + # is implicitly restarted). + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + sm = multiprocessing.get_start_method() + if sm != 'forkserver': + # The fork method by design inherits all fds from the parent, + # trying to go against it is a lost battle + self.skipTest('test not appropriate for {}'.format(sm)) + + from multiprocess.forkserver import _forkserver + _forkserver.ensure_running() + + # First process sleeps 500 ms + delay = 0.5 + + evt = self.Event() + proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) + proc.start() + + pid = _forkserver._forkserver_pid + os.kill(pid, signum) + # give time to the fork server to die and time to proc to complete + time.sleep(delay * 2.0) + + evt2 = self.Event() + proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) + proc2.start() + proc2.join() + self.assertTrue(evt2.is_set()) + self.assertEqual(proc2.exitcode, 0) + + proc.join() + self.assertTrue(evt.is_set()) + self.assertIn(proc.exitcode, (0, 255)) + + def test_forkserver_sigint(self): + # Catchable signal + self.check_forkserver_death(signal.SIGINT) + + def test_forkserver_sigkill(self): + # Uncatchable signal + if os.name != 'nt': + self.check_forkserver_death(signal.SIGKILL) + + +# +# +# + +class _UpperCaser(multiprocessing.Process): + + def __init__(self): + multiprocessing.Process.__init__(self) + self.child_conn, self.parent_conn = multiprocessing.Pipe() + + def run(self): + self.parent_conn.close() + for s in iter(self.child_conn.recv, None): + self.child_conn.send(s.upper()) + self.child_conn.close() + + def submit(self, s): + assert type(s) is str + self.parent_conn.send(s) + return self.parent_conn.recv() + + def stop(self): + self.parent_conn.send(None) + self.parent_conn.close() + self.child_conn.close() + +class _TestSubclassingProcess(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_subclassing(self): + uppercaser = _UpperCaser() + uppercaser.daemon = True + uppercaser.start() + self.assertEqual(uppercaser.submit('hello'), 'HELLO') + self.assertEqual(uppercaser.submit('world'), 'WORLD') + uppercaser.stop() + uppercaser.join() + + def test_stderr_flush(self): + # sys.stderr is flushed at process shutdown (issue #13812) + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + testfn = test.support.TESTFN + self.addCleanup(test.support.unlink, testfn) + proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) + proc.start() + proc.join() + with open(testfn, 'r') as f: + err = f.read() + # The whole traceback was printed + self.assertIn("ZeroDivisionError", err) + self.assertIn("__init__.py", err) + self.assertIn("1/0 # MARKER", err) + + @classmethod + def _test_stderr_flush(cls, testfn): + fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) + sys.stderr = open(fd, 'w', closefd=False) + 1/0 # MARKER + + + @classmethod + def _test_sys_exit(cls, reason, testfn): + fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) + sys.stderr = open(fd, 'w', closefd=False) + sys.exit(reason) + + def test_sys_exit(self): + # See Issue 13854 + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + testfn = test.support.TESTFN + self.addCleanup(test.support.unlink, testfn) + + for reason in ( + [1, 2, 3], + 'ignore this', + ): + p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) + p.daemon = True + p.start() + join_process(p) + self.assertEqual(p.exitcode, 1) + + with open(testfn, 'r') as f: + content = f.read() + self.assertEqual(content.rstrip(), str(reason)) + + os.unlink(testfn) + + cases = [ + ((True,), 1), + ((False,), 0), + ((8,), 8), + ((None,), 0), + ((), 0), + ] + + for args, expected in cases: + with self.subTest(args=args): + p = self.Process(target=sys.exit, args=args) + p.daemon = True + p.start() + join_process(p) + self.assertEqual(p.exitcode, expected) + +# +# +# + +def queue_empty(q): + if hasattr(q, 'empty'): + return q.empty() + else: + return q.qsize() == 0 + +def queue_full(q, maxsize): + if hasattr(q, 'full'): + return q.full() + else: + return q.qsize() == maxsize + + +class _TestQueue(BaseTestCase): + + + @classmethod + def _test_put(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + for i in range(6): + queue.get() + parent_can_continue.set() + + def test_put(self): + MAXSIZE = 6 + queue = self.Queue(maxsize=MAXSIZE) + child_can_start = self.Event() + parent_can_continue = self.Event() + + proc = self.Process( + target=self._test_put, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertEqual(queue_empty(queue), True) + self.assertEqual(queue_full(queue, MAXSIZE), False) + + queue.put(1) + queue.put(2, True) + queue.put(3, True, None) + queue.put(4, False) + queue.put(5, False, None) + queue.put_nowait(6) + + # the values may be in buffer but not yet in pipe so sleep a bit + time.sleep(DELTA) + + self.assertEqual(queue_empty(queue), False) + self.assertEqual(queue_full(queue, MAXSIZE), True) + + put = TimingWrapper(queue.put) + put_nowait = TimingWrapper(queue.put_nowait) + + self.assertRaises(pyqueue.Full, put, 7, False) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, False, None) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put_nowait, 7) + self.assertTimingAlmostEqual(put_nowait.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) + self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) + + self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) + self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) + + child_can_start.set() + parent_can_continue.wait() + + self.assertEqual(queue_empty(queue), True) + self.assertEqual(queue_full(queue, MAXSIZE), False) + + proc.join() + close_queue(queue) + + @classmethod + def _test_get(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + #queue.put(1) + queue.put(2) + queue.put(3) + queue.put(4) + queue.put(5) + parent_can_continue.set() + + def test_get(self): + queue = self.Queue() + child_can_start = self.Event() + parent_can_continue = self.Event() + + proc = self.Process( + target=self._test_get, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertEqual(queue_empty(queue), True) + + child_can_start.set() + parent_can_continue.wait() + + time.sleep(DELTA) + self.assertEqual(queue_empty(queue), False) + + # Hangs unexpectedly, remove for now + #self.assertEqual(queue.get(), 1) + self.assertEqual(queue.get(True, None), 2) + self.assertEqual(queue.get(True), 3) + self.assertEqual(queue.get(timeout=1), 4) + self.assertEqual(queue.get_nowait(), 5) + + self.assertEqual(queue_empty(queue), True) + + get = TimingWrapper(queue.get) + get_nowait = TimingWrapper(queue.get_nowait) + + self.assertRaises(pyqueue.Empty, get, False) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, False, None) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get_nowait) + self.assertTimingAlmostEqual(get_nowait.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) + + self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) + + proc.join() + close_queue(queue) + + @classmethod + def _test_fork(cls, queue): + for i in range(10, 20): + queue.put(i) + # note that at this point the items may only be buffered, so the + # process cannot shutdown until the feeder thread has finished + # pushing items onto the pipe. + + def test_fork(self): + # Old versions of Queue would fail to create a new feeder + # thread for a forked process if the original process had its + # own feeder thread. This test checks that this no longer + # happens. + + queue = self.Queue() + + # put items on queue so that main process starts a feeder thread + for i in range(10): + queue.put(i) + + # wait to make sure thread starts before we fork a new process + time.sleep(DELTA) + + # fork process + p = self.Process(target=self._test_fork, args=(queue,)) + p.daemon = True + p.start() + + # check that all expected items are in the queue + for i in range(20): + self.assertEqual(queue.get(), i) + self.assertRaises(pyqueue.Empty, queue.get, False) + + p.join() + close_queue(queue) + + def test_qsize(self): + q = self.Queue() + try: + self.assertEqual(q.qsize(), 0) + except NotImplementedError: + self.skipTest('qsize method not implemented') + q.put(1) + self.assertEqual(q.qsize(), 1) + q.put(5) + self.assertEqual(q.qsize(), 2) + q.get() + self.assertEqual(q.qsize(), 1) + q.get() + self.assertEqual(q.qsize(), 0) + close_queue(q) + + @classmethod + def _test_task_done(cls, q): + for obj in iter(q.get, None): + time.sleep(DELTA) + q.task_done() + + def test_task_done(self): + queue = self.JoinableQueue() + + workers = [self.Process(target=self._test_task_done, args=(queue,)) + for i in range(4)] + + for p in workers: + p.daemon = True + p.start() + + for i in range(10): + queue.put(i) + + queue.join() + + for p in workers: + queue.put(None) + + for p in workers: + p.join() + close_queue(queue) + + def test_no_import_lock_contention(self): + with test.support.temp_cwd(): + module_name = 'imported_by_an_imported_module' + with open(module_name + '.py', 'w') as f: + f.write("""if 1: + import multiprocess as multiprocessing + + q = multiprocessing.Queue() + q.put('knock knock') + q.get(timeout=3) + q.close() + del q + """) + + with test.support.DirsOnSysPath(os.getcwd()): + try: + __import__(module_name) + except pyqueue.Empty: + self.fail("Probable regression on import lock contention;" + " see Issue #22853") + + def test_timeout(self): + q = multiprocessing.Queue() + start = getattr(time,'monotonic',time.time)() + self.assertRaises(pyqueue.Empty, q.get, True, 0.200) + delta = getattr(time,'monotonic',time.time)() - start + # bpo-30317: Tolerate a delta of 100 ms because of the bad clock + # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once + # failed because the delta was only 135.8 ms. + self.assertGreaterEqual(delta, 0.100) + close_queue(q) + + def test_queue_feeder_donot_stop_onexc(self): + # bpo-30414: verify feeder handles exceptions correctly + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + class NotSerializable(object): + def __reduce__(self): + raise AttributeError + with test.support.captured_stderr(): + q = self.Queue() + q.put(NotSerializable()) + q.put(True) + self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) + close_queue(q) + + with test.support.captured_stderr(): + # bpo-33078: verify that the queue size is correctly handled + # on errors. + q = self.Queue(maxsize=1) + q.put(NotSerializable()) + q.put(True) + try: + self.assertEqual(q.qsize(), 1) + except NotImplementedError: + # qsize is not available on all platform as it + # relies on sem_getvalue + pass + self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) + # Check that the size of the queue is correct + self.assertTrue(q.empty()) + close_queue(q) + + def test_queue_feeder_on_queue_feeder_error(self): + # bpo-30006: verify feeder handles exceptions using the + # _on_queue_feeder_error hook. + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + class NotSerializable(object): + """Mock unserializable object""" + def __init__(self): + self.reduce_was_called = False + self.on_queue_feeder_error_was_called = False + + def __reduce__(self): + self.reduce_was_called = True + raise AttributeError + + class SafeQueue(multiprocessing.queues.Queue): + """Queue with overloaded _on_queue_feeder_error hook""" + @staticmethod + def _on_queue_feeder_error(e, obj): + if (isinstance(e, AttributeError) and + isinstance(obj, NotSerializable)): + obj.on_queue_feeder_error_was_called = True + + not_serializable_obj = NotSerializable() + # The captured_stderr reduces the noise in the test report + with test.support.captured_stderr(): + q = SafeQueue(ctx=multiprocessing.get_context()) + q.put(not_serializable_obj) + + # Verify that q is still functioning correctly + q.put(True) + self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) + + # Assert that the serialization and the hook have been called correctly + self.assertTrue(not_serializable_obj.reduce_was_called) + self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) + + def test_closed_queue_put_get_exceptions(self): + for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): + q.close() + with self.assertRaisesRegex(ValueError, 'is closed'): + q.put('foo') + with self.assertRaisesRegex(ValueError, 'is closed'): + q.get() +# +# +# + +class _TestLock(BaseTestCase): + + def test_lock(self): + lock = self.Lock() + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(False), False) + self.assertEqual(lock.release(), None) + self.assertRaises((ValueError, threading.ThreadError), lock.release) + + def test_rlock(self): + lock = self.RLock() + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.release(), None) + self.assertEqual(lock.release(), None) + self.assertEqual(lock.release(), None) + self.assertRaises((AssertionError, RuntimeError), lock.release) + + def test_lock_context(self): + with self.Lock(): + pass + + +class _TestSemaphore(BaseTestCase): + + def _test_semaphore(self, sem): + self.assertReturnsIfImplemented(2, get_value, sem) + self.assertEqual(sem.acquire(), True) + self.assertReturnsIfImplemented(1, get_value, sem) + self.assertEqual(sem.acquire(), True) + self.assertReturnsIfImplemented(0, get_value, sem) + self.assertEqual(sem.acquire(False), False) + self.assertReturnsIfImplemented(0, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(1, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(2, get_value, sem) + + def test_semaphore(self): + sem = self.Semaphore(2) + self._test_semaphore(sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(3, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(4, get_value, sem) + + def test_bounded_semaphore(self): + sem = self.BoundedSemaphore(2) + self._test_semaphore(sem) + # Currently fails on OS/X + #if HAVE_GETVALUE: + # self.assertRaises(ValueError, sem.release) + # self.assertReturnsIfImplemented(2, get_value, sem) + + def test_timeout(self): + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + sem = self.Semaphore(0) + acquire = TimingWrapper(sem.acquire) + + self.assertEqual(acquire(False), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0.0) + + self.assertEqual(acquire(False, None), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0.0) + + self.assertEqual(acquire(False, TIMEOUT1), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0) + + self.assertEqual(acquire(True, TIMEOUT2), False) + self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) + + self.assertEqual(acquire(timeout=TIMEOUT3), False) + self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) + + +class _TestCondition(BaseTestCase): + + @classmethod + def f(cls, cond, sleeping, woken, timeout=None): + cond.acquire() + sleeping.release() + cond.wait(timeout) + woken.release() + cond.release() + + def assertReachesEventually(self, func, value): + for i in range(10): + try: + if func() == value: + break + except NotImplementedError: + break + time.sleep(DELTA) + time.sleep(DELTA) + self.assertReturnsIfImplemented(value, func) + + def check_invariant(self, cond): + # this is only supposed to succeed when there are no sleepers + if self.TYPE == 'processes': + try: + sleepers = (cond._sleeping_count.get_value() - + cond._woken_count.get_value()) + self.assertEqual(sleepers, 0) + self.assertEqual(cond._wait_semaphore.get_value(), 0) + except NotImplementedError: + pass + + def test_notify(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + # wait for both children to start sleeping + sleeping.acquire() + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake up one process/thread + cond.acquire() + cond.notify() + cond.release() + + # check one process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(1, get_value, woken) + + # wake up another + cond.acquire() + cond.notify() + cond.release() + + # check other has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(2, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + p.join() + + def test_notify_all(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + # start some threads/processes which will timeout + for i in range(3): + p = self.Process(target=self.f, + args=(cond, sleeping, woken, TIMEOUT1)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + t = threading.Thread(target=self.f, + args=(cond, sleeping, woken, TIMEOUT1)) + t.daemon = True + t.start() + self.addCleanup(t.join) + + # wait for them all to sleep + for i in range(6): + sleeping.acquire() + + # check they have all timed out + for i in range(6): + woken.acquire() + self.assertReturnsIfImplemented(0, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + + # start some more threads/processes + for i in range(3): + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + t.daemon = True + t.start() + self.addCleanup(t.join) + + # wait for them to all sleep + for i in range(6): + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake them all up + cond.acquire() + cond.notify_all() + cond.release() + + # check they have all woken + self.assertReachesEventually(lambda: get_value(woken), 6) + + # check state is not mucked up + self.check_invariant(cond) + + def test_notify_n(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + # start some threads/processes + for i in range(3): + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + t.daemon = True + t.start() + self.addCleanup(t.join) + + # wait for them to all sleep + for i in range(6): + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake some of them up + cond.acquire() + cond.notify(n=2) + cond.release() + + # check 2 have woken + self.assertReachesEventually(lambda: get_value(woken), 2) + + # wake the rest of them + cond.acquire() + cond.notify(n=4) + cond.release() + + self.assertReachesEventually(lambda: get_value(woken), 6) + + # doesn't do anything more + cond.acquire() + cond.notify(n=3) + cond.release() + + self.assertReturnsIfImplemented(6, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + + def test_timeout(self): + cond = self.Condition() + wait = TimingWrapper(cond.wait) + cond.acquire() + res = wait(TIMEOUT1) + cond.release() + self.assertEqual(res, False) + self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) + + @classmethod + def _test_waitfor_f(cls, cond, state): + with cond: + state.value = 0 + cond.notify() + result = cond.wait_for(lambda : state.value==4) + if not result or state.value != 4: + sys.exit(1) + + @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') + def test_waitfor(self): + # based on test in test/lock_tests.py + cond = self.Condition() + state = self.Value('i', -1) + + p = self.Process(target=self._test_waitfor_f, args=(cond, state)) + p.daemon = True + p.start() + + with cond: + result = cond.wait_for(lambda : state.value==0) + self.assertTrue(result) + self.assertEqual(state.value, 0) + + for i in range(4): + time.sleep(0.01) + with cond: + state.value += 1 + cond.notify() + + join_process(p) + self.assertEqual(p.exitcode, 0) + + @classmethod + def _test_waitfor_timeout_f(cls, cond, state, success, sem): + sem.release() + with cond: + expected = 0.1 + dt = getattr(time,'monotonic',time.time)() + result = cond.wait_for(lambda : state.value==4, timeout=expected) + dt = getattr(time,'monotonic',time.time)() - dt + # borrow logic in assertTimeout() from test/lock_tests.py + if not result and expected * 0.6 < dt < expected * 10.0: + success.value = True + + @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') + def test_waitfor_timeout(self): + # based on test in test/lock_tests.py + cond = self.Condition() + state = self.Value('i', 0) + success = self.Value('i', False) + sem = self.Semaphore(0) + + p = self.Process(target=self._test_waitfor_timeout_f, + args=(cond, state, success, sem)) + p.daemon = True + p.start() + self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) + + # Only increment 3 times, so state == 4 is never reached. + for i in range(3): + time.sleep(0.01) + with cond: + state.value += 1 + cond.notify() + + join_process(p) + self.assertTrue(success.value) + + @classmethod + def _test_wait_result(cls, c, pid): + with c: + c.notify() + time.sleep(1) + if pid is not None: + os.kill(pid, signal.SIGINT) + + def test_wait_result(self): + if isinstance(self, ProcessesMixin) and sys.platform != 'win32': + pid = os.getpid() + else: + pid = None + + c = self.Condition() + with c: + self.assertFalse(c.wait(0)) + self.assertFalse(c.wait(0.1)) + + p = self.Process(target=self._test_wait_result, args=(c, pid)) + p.start() + + self.assertTrue(c.wait(60)) + if pid is not None: + self.assertRaises(KeyboardInterrupt, c.wait, 60) + + p.join() + + +class _TestEvent(BaseTestCase): + + @classmethod + def _test_event(cls, event): + time.sleep(TIMEOUT2) + event.set() + + def test_event(self): + event = self.Event() + wait = TimingWrapper(event.wait) + + # Removed temporarily, due to API shear, this does not + # work with threading._Event objects. is_set == isSet + self.assertEqual(event.is_set(), False) + + # Removed, threading.Event.wait() will return the value of the __flag + # instead of None. API Shear with the semaphore backed mp.Event + self.assertEqual(wait(0.0), False) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + self.assertEqual(wait(TIMEOUT1), False) + self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) + + event.set() + + # See note above on the API differences + self.assertEqual(event.is_set(), True) + self.assertEqual(wait(), True) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + self.assertEqual(wait(TIMEOUT1), True) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + # self.assertEqual(event.is_set(), True) + + event.clear() + + #self.assertEqual(event.is_set(), False) + + p = self.Process(target=self._test_event, args=(event,)) + p.daemon = True + p.start() + self.assertEqual(wait(), True) + p.join() + +# +# Tests for Barrier - adapted from tests in test/lock_tests.py +# + +# Many of the tests for threading.Barrier use a list as an atomic +# counter: a value is appended to increment the counter, and the +# length of the list gives the value. We use the class DummyList +# for the same purpose. + +class _DummyList(object): + + def __init__(self): + wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) + lock = multiprocessing.Lock() + self.__setstate__((wrapper, lock)) + self._lengthbuf[0] = 0 + + def __setstate__(self, state): + (self._wrapper, self._lock) = state + self._lengthbuf = self._wrapper.create_memoryview().cast('i') + + def __getstate__(self): + return (self._wrapper, self._lock) + + def append(self, _): + with self._lock: + self._lengthbuf[0] += 1 + + def __len__(self): + with self._lock: + return self._lengthbuf[0] + +def _wait(): + # A crude wait/yield function not relying on synchronization primitives. + time.sleep(0.01) + + +class Bunch(object): + """ + A bunch of threads. + """ + def __init__(self, namespace, f, args, n, wait_before_exit=False): + """ + Construct a bunch of `n` threads running the same function `f`. + If `wait_before_exit` is True, the threads won't terminate until + do_finish() is called. + """ + self.f = f + self.args = args + self.n = n + self.started = namespace.DummyList() + self.finished = namespace.DummyList() + self._can_exit = namespace.Event() + if not wait_before_exit: + self._can_exit.set() + + threads = [] + for i in range(n): + p = namespace.Process(target=self.task) + p.daemon = True + p.start() + threads.append(p) + + def finalize(threads): + for p in threads: + p.join() + + self._finalizer = weakref.finalize(self, finalize, threads) + + def task(self): + pid = os.getpid() + self.started.append(pid) + try: + self.f(*self.args) + finally: + self.finished.append(pid) + self._can_exit.wait(30) + assert self._can_exit.is_set() + + def wait_for_started(self): + while len(self.started) < self.n: + _wait() + + def wait_for_finished(self): + while len(self.finished) < self.n: + _wait() + + def do_finish(self): + self._can_exit.set() + + def close(self): + self._finalizer() + + +class AppendTrue(object): + def __init__(self, obj): + self.obj = obj + def __call__(self): + self.obj.append(True) + + +class _TestBarrier(BaseTestCase): + """ + Tests for Barrier objects. + """ + N = 5 + defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout + + def setUp(self): + self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) + + def tearDown(self): + self.barrier.abort() + self.barrier = None + + def DummyList(self): + if self.TYPE == 'threads': + return [] + elif self.TYPE == 'manager': + return self.manager.list() + else: + return _DummyList() + + def run_threads(self, f, args): + b = Bunch(self, f, args, self.N-1) + try: + f(*args) + b.wait_for_finished() + finally: + b.close() + + @classmethod + def multipass(cls, barrier, results, n): + m = barrier.parties + assert m == cls.N + for i in range(n): + results[0].append(True) + assert len(results[1]) == i * m + barrier.wait() + results[1].append(True) + assert len(results[0]) == (i + 1) * m + barrier.wait() + try: + assert barrier.n_waiting == 0 + except NotImplementedError: + pass + assert not barrier.broken + + def test_barrier(self, passes=1): + """ + Test that a barrier is passed in lockstep + """ + results = [self.DummyList(), self.DummyList()] + self.run_threads(self.multipass, (self.barrier, results, passes)) + + def test_barrier_10(self): + """ + Test that a barrier works for 10 consecutive runs + """ + return self.test_barrier(10) + + @classmethod + def _test_wait_return_f(cls, barrier, queue): + res = barrier.wait() + queue.put(res) + + def test_wait_return(self): + """ + test the return value from barrier.wait + """ + queue = self.Queue() + self.run_threads(self._test_wait_return_f, (self.barrier, queue)) + results = [queue.get() for i in range(self.N)] + self.assertEqual(results.count(0), 1) + close_queue(queue) + + @classmethod + def _test_action_f(cls, barrier, results): + barrier.wait() + if len(results) != 1: + raise RuntimeError + + def test_action(self): + """ + Test the 'action' callback + """ + results = self.DummyList() + barrier = self.Barrier(self.N, action=AppendTrue(results)) + self.run_threads(self._test_action_f, (barrier, results)) + self.assertEqual(len(results), 1) + + @classmethod + def _test_abort_f(cls, barrier, results1, results2): + try: + i = barrier.wait() + if i == cls.N//2: + raise RuntimeError + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + except RuntimeError: + barrier.abort() + + def test_abort(self): + """ + Test that an abort will put the barrier in a broken state + """ + results1 = self.DummyList() + results2 = self.DummyList() + self.run_threads(self._test_abort_f, + (self.barrier, results1, results2)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertTrue(self.barrier.broken) + + @classmethod + def _test_reset_f(cls, barrier, results1, results2, results3): + i = barrier.wait() + if i == cls.N//2: + # Wait until the other threads are all in the barrier. + while barrier.n_waiting < cls.N-1: + time.sleep(0.001) + barrier.reset() + else: + try: + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + # Now, pass the barrier again + barrier.wait() + results3.append(True) + + def test_reset(self): + """ + Test that a 'reset' on a barrier frees the waiting threads + """ + results1 = self.DummyList() + results2 = self.DummyList() + results3 = self.DummyList() + self.run_threads(self._test_reset_f, + (self.barrier, results1, results2, results3)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertEqual(len(results3), self.N) + + @classmethod + def _test_abort_and_reset_f(cls, barrier, barrier2, + results1, results2, results3): + try: + i = barrier.wait() + if i == cls.N//2: + raise RuntimeError + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + except RuntimeError: + barrier.abort() + # Synchronize and reset the barrier. Must synchronize first so + # that everyone has left it when we reset, and after so that no + # one enters it before the reset. + if barrier2.wait() == cls.N//2: + barrier.reset() + barrier2.wait() + barrier.wait() + results3.append(True) + + def test_abort_and_reset(self): + """ + Test that a barrier can be reset after being broken. + """ + results1 = self.DummyList() + results2 = self.DummyList() + results3 = self.DummyList() + barrier2 = self.Barrier(self.N) + + self.run_threads(self._test_abort_and_reset_f, + (self.barrier, barrier2, results1, results2, results3)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertEqual(len(results3), self.N) + + @classmethod + def _test_timeout_f(cls, barrier, results): + i = barrier.wait() + if i == cls.N//2: + # One thread is late! + time.sleep(1.0) + try: + barrier.wait(0.5) + except threading.BrokenBarrierError: + results.append(True) + + def test_timeout(self): + """ + Test wait(timeout) + """ + results = self.DummyList() + self.run_threads(self._test_timeout_f, (self.barrier, results)) + self.assertEqual(len(results), self.barrier.parties) + + @classmethod + def _test_default_timeout_f(cls, barrier, results): + i = barrier.wait(cls.defaultTimeout) + if i == cls.N//2: + # One thread is later than the default timeout + time.sleep(1.0) + try: + barrier.wait() + except threading.BrokenBarrierError: + results.append(True) + + def test_default_timeout(self): + """ + Test the barrier's default timeout + """ + barrier = self.Barrier(self.N, timeout=0.5) + results = self.DummyList() + self.run_threads(self._test_default_timeout_f, (barrier, results)) + self.assertEqual(len(results), barrier.parties) + + def test_single_thread(self): + b = self.Barrier(1) + b.wait() + b.wait() + + @classmethod + def _test_thousand_f(cls, barrier, passes, conn, lock): + for i in range(passes): + barrier.wait() + with lock: + conn.send(i) + + def test_thousand(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + passes = 1000 + lock = self.Lock() + conn, child_conn = self.Pipe(False) + for j in range(self.N): + p = self.Process(target=self._test_thousand_f, + args=(self.barrier, passes, child_conn, lock)) + p.start() + self.addCleanup(p.join) + + for i in range(passes): + for j in range(self.N): + self.assertEqual(conn.recv(), i) + +# +# +# + +class _TestValue(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + codes_values = [ + ('i', 4343, 24234), + ('d', 3.625, -4.25), + ('h', -232, 234), + ('q', 2 ** 33, 2 ** 34), + ('c', latin('x'), latin('y')) + ] + + def setUp(self): + if not HAS_SHAREDCTYPES: + self.skipTest("requires multiprocess.sharedctypes") + + @classmethod + def _test(cls, values): + for sv, cv in zip(values, cls.codes_values): + sv.value = cv[2] + + + def test_value(self, raw=False): + if raw: + values = [self.RawValue(code, value) + for code, value, _ in self.codes_values] + else: + values = [self.Value(code, value) + for code, value, _ in self.codes_values] + + for sv, cv in zip(values, self.codes_values): + self.assertEqual(sv.value, cv[1]) + + proc = self.Process(target=self._test, args=(values,)) + proc.daemon = True + proc.start() + proc.join() + + for sv, cv in zip(values, self.codes_values): + self.assertEqual(sv.value, cv[2]) + + def test_rawvalue(self): + self.test_value(raw=True) + + def test_getobj_getlock(self): + val1 = self.Value('i', 5) + lock1 = val1.get_lock() + obj1 = val1.get_obj() + + val2 = self.Value('i', 5, lock=None) + lock2 = val2.get_lock() + obj2 = val2.get_obj() + + lock = self.Lock() + val3 = self.Value('i', 5, lock=lock) + lock3 = val3.get_lock() + obj3 = val3.get_obj() + self.assertEqual(lock, lock3) + + arr4 = self.Value('i', 5, lock=False) + self.assertFalse(hasattr(arr4, 'get_lock')) + self.assertFalse(hasattr(arr4, 'get_obj')) + + self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') + + arr5 = self.RawValue('i', 5) + self.assertFalse(hasattr(arr5, 'get_lock')) + self.assertFalse(hasattr(arr5, 'get_obj')) + + +class _TestArray(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def f(cls, seq): + for i in range(1, len(seq)): + seq[i] += seq[i-1] + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_array(self, raw=False): + seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] + if raw: + arr = self.RawArray('i', seq) + else: + arr = self.Array('i', seq) + + self.assertEqual(len(arr), len(seq)) + self.assertEqual(arr[3], seq[3]) + self.assertEqual(list(arr[2:7]), list(seq[2:7])) + + arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) + + self.assertEqual(list(arr[:]), seq) + + self.f(seq) + + p = self.Process(target=self.f, args=(arr,)) + p.daemon = True + p.start() + p.join() + + self.assertEqual(list(arr[:]), seq) + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_array_from_size(self): + size = 10 + # Test for zeroing (see issue #11675). + # The repetition below strengthens the test by increasing the chances + # of previously allocated non-zero memory being used for the new array + # on the 2nd and 3rd loops. + for _ in range(3): + arr = self.Array('i', size) + self.assertEqual(len(arr), size) + self.assertEqual(list(arr), [0] * size) + arr[:] = range(10) + self.assertEqual(list(arr), list(range(10))) + del arr + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_rawarray(self): + self.test_array(raw=True) + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_getobj_getlock_obj(self): + arr1 = self.Array('i', list(range(10))) + lock1 = arr1.get_lock() + obj1 = arr1.get_obj() + + arr2 = self.Array('i', list(range(10)), lock=None) + lock2 = arr2.get_lock() + obj2 = arr2.get_obj() + + lock = self.Lock() + arr3 = self.Array('i', list(range(10)), lock=lock) + lock3 = arr3.get_lock() + obj3 = arr3.get_obj() + self.assertEqual(lock, lock3) + + arr4 = self.Array('i', range(10), lock=False) + self.assertFalse(hasattr(arr4, 'get_lock')) + self.assertFalse(hasattr(arr4, 'get_obj')) + self.assertRaises(AttributeError, + self.Array, 'i', range(10), lock='notalock') + + arr5 = self.RawArray('i', range(10)) + self.assertFalse(hasattr(arr5, 'get_lock')) + self.assertFalse(hasattr(arr5, 'get_obj')) + +# +# +# + +class _TestContainers(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def test_list(self): + a = self.list(list(range(10))) + self.assertEqual(a[:], list(range(10))) + + b = self.list() + self.assertEqual(b[:], []) + + b.extend(list(range(5))) + self.assertEqual(b[:], list(range(5))) + + self.assertEqual(b[2], 2) + self.assertEqual(b[2:10], [2,3,4]) + + b *= 2 + self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) + + self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) + + self.assertEqual(a[:], list(range(10))) + + d = [a, b] + e = self.list(d) + self.assertEqual( + [element[:] for element in e], + [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] + ) + + f = self.list([a]) + a.append('hello') + self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) + + def test_list_iter(self): + a = self.list(list(range(10))) + it = iter(a) + self.assertEqual(list(it), list(range(10))) + self.assertEqual(list(it), []) # exhausted + # list modified during iteration + it = iter(a) + a[0] = 100 + self.assertEqual(next(it), 100) + + def test_list_proxy_in_list(self): + a = self.list([self.list(range(3)) for _i in range(3)]) + self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) + + a[0][-1] = 55 + self.assertEqual(a[0][:], [0, 1, 55]) + for i in range(1, 3): + self.assertEqual(a[i][:], [0, 1, 2]) + + self.assertEqual(a[1].pop(), 2) + self.assertEqual(len(a[1]), 2) + for i in range(0, 3, 2): + self.assertEqual(len(a[i]), 3) + + del a + + b = self.list() + b.append(b) + del b + + def test_dict(self): + d = self.dict() + indices = list(range(65, 70)) + for i in indices: + d[i] = chr(i) + self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) + self.assertEqual(sorted(d.keys()), indices) + self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) + self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) + + def test_dict_iter(self): + d = self.dict() + indices = list(range(65, 70)) + for i in indices: + d[i] = chr(i) + it = iter(d) + self.assertEqual(list(it), indices) + self.assertEqual(list(it), []) # exhausted + # dictionary changed size during iteration + it = iter(d) + d.clear() + self.assertRaises(RuntimeError, next, it) + + def test_dict_proxy_nested(self): + pets = self.dict(ferrets=2, hamsters=4) + supplies = self.dict(water=10, feed=3) + d = self.dict(pets=pets, supplies=supplies) + + self.assertEqual(supplies['water'], 10) + self.assertEqual(d['supplies']['water'], 10) + + d['supplies']['blankets'] = 5 + self.assertEqual(supplies['blankets'], 5) + self.assertEqual(d['supplies']['blankets'], 5) + + d['supplies']['water'] = 7 + self.assertEqual(supplies['water'], 7) + self.assertEqual(d['supplies']['water'], 7) + + del pets + del supplies + self.assertEqual(d['pets']['ferrets'], 2) + d['supplies']['blankets'] = 11 + self.assertEqual(d['supplies']['blankets'], 11) + + pets = d['pets'] + supplies = d['supplies'] + supplies['water'] = 7 + self.assertEqual(supplies['water'], 7) + self.assertEqual(d['supplies']['water'], 7) + + d.clear() + self.assertEqual(len(d), 0) + self.assertEqual(supplies['water'], 7) + self.assertEqual(pets['hamsters'], 4) + + l = self.list([pets, supplies]) + l[0]['marmots'] = 1 + self.assertEqual(pets['marmots'], 1) + self.assertEqual(l[0]['marmots'], 1) + + del pets + del supplies + self.assertEqual(l[0]['marmots'], 1) + + outer = self.list([[88, 99], l]) + self.assertIsInstance(outer[0], list) # Not a ListProxy + self.assertEqual(outer[-1][-1]['feed'], 3) + + def test_namespace(self): + n = self.Namespace() + n.name = 'Bob' + n.job = 'Builder' + n._hidden = 'hidden' + self.assertEqual((n.name, n.job), ('Bob', 'Builder')) + del n.job + self.assertEqual(str(n), "Namespace(name='Bob')") + self.assertTrue(hasattr(n, 'name')) + self.assertTrue(not hasattr(n, 'job')) + +# +# +# + +def sqr(x, wait=0.0): + time.sleep(wait) + return x*x + +def mul(x, y): + return x*y + +def raise_large_valuerror(wait): + time.sleep(wait) + raise ValueError("x" * 1024**2) + +def identity(x): + return x + +class CountedObject(object): + n_instances = 0 + + def __new__(cls): + cls.n_instances += 1 + return object.__new__(cls) + + def __del__(self): + type(self).n_instances -= 1 + +class SayWhenError(ValueError): pass + +def exception_throwing_generator(total, when): + if when == -1: + raise SayWhenError("Somebody said when") + for i in range(total): + if i == when: + raise SayWhenError("Somebody said when") + yield i + + +class _TestPool(BaseTestCase): + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.pool = cls.Pool(4) + + @classmethod + def tearDownClass(cls): + cls.pool.terminate() + cls.pool.join() + cls.pool = None + super().tearDownClass() + + def test_apply(self): + papply = self.pool.apply + self.assertEqual(papply(sqr, (5,)), sqr(5)) + self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) + + def test_map(self): + pmap = self.pool.map + self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) + self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), + list(map(sqr, list(range(100))))) + + def test_starmap(self): + psmap = self.pool.starmap + tuples = list(zip(range(10), range(9,-1, -1))) + self.assertEqual(psmap(mul, tuples), + list(itertools.starmap(mul, tuples))) + tuples = list(zip(range(100), range(99,-1, -1))) + self.assertEqual(psmap(mul, tuples, chunksize=20), + list(itertools.starmap(mul, tuples))) + + def test_starmap_async(self): + tuples = list(zip(range(100), range(99,-1, -1))) + self.assertEqual(self.pool.starmap_async(mul, tuples).get(), + list(itertools.starmap(mul, tuples))) + + def test_map_async(self): + self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), + list(map(sqr, list(range(10))))) + + def test_map_async_callbacks(self): + call_args = self.manager.list() if self.TYPE == 'manager' else [] + self.pool.map_async(int, ['1'], + callback=call_args.append, + error_callback=call_args.append).wait() + self.assertEqual(1, len(call_args)) + self.assertEqual([1], call_args[0]) + self.pool.map_async(int, ['a'], + callback=call_args.append, + error_callback=call_args.append).wait() + self.assertEqual(2, len(call_args)) + self.assertIsInstance(call_args[1], ValueError) + + def test_map_unplicklable(self): + # Issue #19425 -- failure to pickle should not cause a hang + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + class A(object): + def __reduce__(self): + raise RuntimeError('cannot pickle') + with self.assertRaises(RuntimeError): + self.pool.map(sqr, [A()]*10) + + def test_map_chunksize(self): + try: + self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) + except multiprocessing.TimeoutError: + self.fail("pool.map_async with chunksize stalled on null list") + + def test_map_handle_iterable_exception(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # SayWhenError seen at the very first of the iterable + with self.assertRaises(SayWhenError): + self.pool.map(sqr, exception_throwing_generator(1, -1), 1) + # again, make sure it's reentrant + with self.assertRaises(SayWhenError): + self.pool.map(sqr, exception_throwing_generator(1, -1), 1) + + with self.assertRaises(SayWhenError): + self.pool.map(sqr, exception_throwing_generator(10, 3), 1) + + class SpecialIterable: + def __iter__(self): + return self + def __next__(self): + raise SayWhenError + def __len__(self): + return 1 + with self.assertRaises(SayWhenError): + self.pool.map(sqr, SpecialIterable(), 1) + with self.assertRaises(SayWhenError): + self.pool.map(sqr, SpecialIterable(), 1) + + def test_async(self): + res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) + get = TimingWrapper(res.get) + self.assertEqual(get(), 49) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) + + def test_async_timeout(self): + res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) + get = TimingWrapper(res.get) + self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) + + def test_imap(self): + it = self.pool.imap(sqr, list(range(10))) + self.assertEqual(list(it), list(map(sqr, list(range(10))))) + + it = self.pool.imap(sqr, list(range(10))) + for i in range(10): + self.assertEqual(next(it), i*i) + self.assertRaises(StopIteration, it.__next__) + + it = self.pool.imap(sqr, list(range(1000)), chunksize=100) + for i in range(1000): + self.assertEqual(next(it), i*i) + self.assertRaises(StopIteration, it.__next__) + + def test_imap_handle_iterable_exception(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # SayWhenError seen at the very first of the iterable + it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) + self.assertRaises(SayWhenError, it.__next__) + # again, make sure it's reentrant + it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) + self.assertRaises(SayWhenError, it.__next__) + + it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) + for i in range(3): + self.assertEqual(next(it), i*i) + self.assertRaises(SayWhenError, it.__next__) + + # SayWhenError seen at start of problematic chunk's results + it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) + for i in range(6): + self.assertEqual(next(it), i*i) + self.assertRaises(SayWhenError, it.__next__) + it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) + for i in range(4): + self.assertEqual(next(it), i*i) + self.assertRaises(SayWhenError, it.__next__) + + def test_imap_unordered(self): + it = self.pool.imap_unordered(sqr, list(range(10))) + self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) + + it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) + self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) + + def test_imap_unordered_handle_iterable_exception(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # SayWhenError seen at the very first of the iterable + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(1, -1), + 1) + self.assertRaises(SayWhenError, it.__next__) + # again, make sure it's reentrant + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(1, -1), + 1) + self.assertRaises(SayWhenError, it.__next__) + + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(10, 3), + 1) + expected_values = list(map(sqr, list(range(10)))) + with self.assertRaises(SayWhenError): + # imap_unordered makes it difficult to anticipate the SayWhenError + for i in range(10): + value = next(it) + self.assertIn(value, expected_values) + expected_values.remove(value) + + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(20, 7), + 2) + expected_values = list(map(sqr, list(range(20)))) + with self.assertRaises(SayWhenError): + for i in range(20): + value = next(it) + self.assertIn(value, expected_values) + expected_values.remove(value) + + def test_make_pool(self): + expected_error = (RemoteError if self.TYPE == 'manager' + else ValueError) + + self.assertRaises(expected_error, self.Pool, -1) + self.assertRaises(expected_error, self.Pool, 0) + + if self.TYPE != 'manager': + p = self.Pool(3) + try: + self.assertEqual(3, len(p._pool)) + finally: + p.close() + p.join() + + def test_terminate(self): + result = self.pool.map_async( + time.sleep, [0.1 for i in range(10000)], chunksize=1 + ) + self.pool.terminate() + join = TimingWrapper(self.pool.join) + join() + # Sanity check the pool didn't wait for all tasks to finish + self.assertLess(join.elapsed, 2.0) + + def test_empty_iterable(self): + # See Issue 12157 + p = self.Pool(1) + + self.assertEqual(p.map(sqr, []), []) + self.assertEqual(list(p.imap(sqr, [])), []) + self.assertEqual(list(p.imap_unordered(sqr, [])), []) + self.assertEqual(p.map_async(sqr, []).get(), []) + + p.close() + p.join() + + def test_context(self): + if self.TYPE == 'processes': + L = list(range(10)) + expected = [sqr(i) for i in L] + with self.Pool(2) as p: + r = p.map_async(sqr, L) + self.assertEqual(r.get(), expected) + p.join() + self.assertRaises(ValueError, p.map_async, sqr, L) + + @classmethod + def _test_traceback(cls): + raise RuntimeError(123) # some comment + + @unittest.skipIf(True, "fails with is_dill(obj, child=True)") + def test_traceback(self): + # We want ensure that the traceback from the child process is + # contained in the traceback raised in the main process. + if self.TYPE == 'processes': + with self.Pool(1) as p: + try: + p.apply(self._test_traceback) + except Exception as e: + exc = e + else: + self.fail('expected RuntimeError') + p.join() + self.assertIs(type(exc), RuntimeError) + self.assertEqual(exc.args, (123,)) + cause = exc.__cause__ + self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) + self.assertIn('raise RuntimeError(123) # some comment', cause.tb) + + with test.support.captured_stderr() as f1: + try: + raise exc + except RuntimeError: + sys.excepthook(*sys.exc_info()) + self.assertIn('raise RuntimeError(123) # some comment', + f1.getvalue()) + # _helper_reraises_exception should not make the error + # a remote exception + with self.Pool(1) as p: + try: + p.map(sqr, exception_throwing_generator(1, -1), 1) + except Exception as e: + exc = e + else: + self.fail('expected SayWhenError') + self.assertIs(type(exc), SayWhenError) + self.assertIs(exc.__cause__, None) + p.join() + + @classmethod + def _test_wrapped_exception(cls): + raise RuntimeError('foo') + + @unittest.skipIf(True, "fails with is_dill(obj, child=True)") + def test_wrapped_exception(self): + # Issue #20980: Should not wrap exception when using thread pool + with self.Pool(1) as p: + with self.assertRaises(RuntimeError): + p.apply(self._test_wrapped_exception) + p.join() + + def test_map_no_failfast(self): + # Issue #23992: the fail-fast behaviour when an exception is raised + # during map() would make Pool.join() deadlock, because a worker + # process would fill the result queue (after the result handler thread + # terminated, hence not draining it anymore). + + t_start = getattr(time,'monotonic',time.time)() + + with self.assertRaises(ValueError): + with self.Pool(2) as p: + try: + p.map(raise_large_valuerror, [0, 1]) + finally: + time.sleep(0.5) + p.close() + p.join() + + # check that we indeed waited for all jobs + self.assertGreater(getattr(time,'monotonic',time.time)() - t_start, 0.9) + + def test_release_task_refs(self): + # Issue #29861: task arguments and results should not be kept + # alive after we are done with them. + objs = [CountedObject() for i in range(10)] + refs = [weakref.ref(o) for o in objs] + self.pool.map(identity, objs) + + del objs + time.sleep(DELTA) # let threaded cleanup code run + self.assertEqual(set(wr() for wr in refs), {None}) + # With a process pool, copies of the objects are returned, check + # they were released too. + self.assertEqual(CountedObject.n_instances, 0) + + def test_enter(self): + if self.TYPE == 'manager': + self.skipTest("test not applicable to manager") + + pool = self.Pool(1) + with pool: + pass + # call pool.terminate() + # pool is no longer running + + with self.assertRaises(ValueError): + # bpo-35477: pool.__enter__() fails if the pool is not running + with pool: + pass + pool.join() + + def test_resource_warning(self): + if self.TYPE == 'manager': + self.skipTest("test not applicable to manager") + + pool = self.Pool(1) + pool.terminate() + pool.join() + + # force state to RUN to emit ResourceWarning in __del__() + pool._state = multiprocessing.pool.RUN + + with support.check_warnings(('unclosed running multiprocessing pool', + ResourceWarning)): + pool = None + support.gc_collect() + +def raising(): + raise KeyError("key") + +def unpickleable_result(): + return lambda: 42 + +class _TestPoolWorkerErrors(BaseTestCase): + ALLOWED_TYPES = ('processes', ) + + def test_async_error_callback(self): + p = multiprocessing.Pool(2) + + scratchpad = [None] + def errback(exc): + scratchpad[0] = exc + + res = p.apply_async(raising, error_callback=errback) + self.assertRaises(KeyError, res.get) + self.assertTrue(scratchpad[0]) + self.assertIsInstance(scratchpad[0], KeyError) + + p.close() + p.join() + + def _test_unpickleable_result(self): + from multiprocess.pool import MaybeEncodingError + p = multiprocessing.Pool(2) + + # Make sure we don't lose pool processes because of encoding errors. + for iteration in range(20): + + scratchpad = [None] + def errback(exc): + scratchpad[0] = exc + + res = p.apply_async(unpickleable_result, error_callback=errback) + self.assertRaises(MaybeEncodingError, res.get) + wrapped = scratchpad[0] + self.assertTrue(wrapped) + self.assertIsInstance(scratchpad[0], MaybeEncodingError) + self.assertIsNotNone(wrapped.exc) + self.assertIsNotNone(wrapped.value) + + p.close() + p.join() + +class _TestPoolWorkerLifetime(BaseTestCase): + ALLOWED_TYPES = ('processes', ) + + def test_pool_worker_lifetime(self): + p = multiprocessing.Pool(3, maxtasksperchild=10) + self.assertEqual(3, len(p._pool)) + origworkerpids = [w.pid for w in p._pool] + # Run many tasks so each worker gets replaced (hopefully) + results = [] + for i in range(100): + results.append(p.apply_async(sqr, (i, ))) + # Fetch the results and verify we got the right answers, + # also ensuring all the tasks have completed. + for (j, res) in enumerate(results): + self.assertEqual(res.get(), sqr(j)) + # Refill the pool + p._repopulate_pool() + # Wait until all workers are alive + # (countdown * DELTA = 5 seconds max startup process time) + countdown = 50 + while countdown and not all(w.is_alive() for w in p._pool): + countdown -= 1 + time.sleep(DELTA) + finalworkerpids = [w.pid for w in p._pool] + # All pids should be assigned. See issue #7805. + self.assertNotIn(None, origworkerpids) + self.assertNotIn(None, finalworkerpids) + # Finally, check that the worker pids have changed + self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) + p.close() + p.join() + + def test_pool_worker_lifetime_early_close(self): + # Issue #10332: closing a pool whose workers have limited lifetimes + # before all the tasks completed would make join() hang. + p = multiprocessing.Pool(3, maxtasksperchild=1) + results = [] + for i in range(6): + results.append(p.apply_async(sqr, (i, 0.3))) + p.close() + p.join() + # check the results + for (j, res) in enumerate(results): + self.assertEqual(res.get(), sqr(j)) + + def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): + # tests cases against bpo-38744 and bpo-39360 + cmd = '''if 1: + from multiprocessing import Pool + problem = None + class A: + def __init__(self): + self.pool = Pool(processes=1) + def test(): + global problem + problem = A() + problem.pool.map(float, tuple(range(10))) + if __name__ == "__main__": + test() + ''' + rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd) + self.assertEqual(rc, 0) + +# +# Test of creating a customized manager class +# + +from multiprocess.managers import BaseManager, BaseProxy, RemoteError + +class FooBar(object): + def f(self): + return 'f()' + def g(self): + raise ValueError + def _h(self): + return '_h()' + +def baz(): + for i in range(10): + yield i*i + +class IteratorProxy(BaseProxy): + _exposed_ = ('__next__',) + def __iter__(self): + return self + def __next__(self): + return self._callmethod('__next__') + +class MyManager(BaseManager): + pass + +MyManager.register('Foo', callable=FooBar) +MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) +MyManager.register('baz', callable=baz, proxytype=IteratorProxy) + + +class _TestMyManager(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def test_mymanager(self): + manager = MyManager() + manager.start() + self.common(manager) + manager.shutdown() + + # bpo-30356: BaseManager._finalize_manager() sends SIGTERM + # to the manager process if it takes longer than 1 second to stop, + # which happens on slow buildbots. + self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) + + def test_mymanager_context(self): + with MyManager() as manager: + self.common(manager) + # bpo-30356: BaseManager._finalize_manager() sends SIGTERM + # to the manager process if it takes longer than 1 second to stop, + # which happens on slow buildbots. + self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) + + def test_mymanager_context_prestarted(self): + manager = MyManager() + manager.start() + with manager: + self.common(manager) + self.assertEqual(manager._process.exitcode, 0) + + def common(self, manager): + foo = manager.Foo() + bar = manager.Bar() + baz = manager.baz() + + foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] + bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] + + self.assertEqual(foo_methods, ['f', 'g']) + self.assertEqual(bar_methods, ['f', '_h']) + + self.assertEqual(foo.f(), 'f()') + self.assertRaises(ValueError, foo.g) + self.assertEqual(foo._callmethod('f'), 'f()') + self.assertRaises(RemoteError, foo._callmethod, '_h') + + self.assertEqual(bar.f(), 'f()') + self.assertEqual(bar._h(), '_h()') + self.assertEqual(bar._callmethod('f'), 'f()') + self.assertEqual(bar._callmethod('_h'), '_h()') + + self.assertEqual(list(baz), [i*i for i in range(10)]) + + +# +# Test of connecting to a remote server and using xmlrpclib for serialization +# + +_queue = pyqueue.Queue() +def get_queue(): + return _queue + +class QueueManager(BaseManager): + '''manager class used by server process''' +QueueManager.register('get_queue', callable=get_queue) + +class QueueManager2(BaseManager): + '''manager class which specifies the same interface as QueueManager''' +QueueManager2.register('get_queue') + + +SERIALIZER = 'xmlrpclib' + +class _TestRemoteManager(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + values = ['hello world', None, True, 2.25, + 'hall\xe5 v\xe4rlden', + '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', + b'hall\xe5 v\xe4rlden', + ] + result = values[:] + + @classmethod + def _putter(cls, address, authkey): + manager = QueueManager2( + address=address, authkey=authkey, serializer=SERIALIZER + ) + manager.connect() + queue = manager.get_queue() + # Note that xmlrpclib will deserialize object as a list not a tuple + queue.put(tuple(cls.values)) + + def test_remote(self): + authkey = os.urandom(32) + + manager = QueueManager( + address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER + ) + manager.start() + self.addCleanup(manager.shutdown) + + p = self.Process(target=self._putter, args=(manager.address, authkey)) + p.daemon = True + p.start() + + manager2 = QueueManager2( + address=manager.address, authkey=authkey, serializer=SERIALIZER + ) + manager2.connect() + queue = manager2.get_queue() + + self.assertEqual(queue.get(), self.result) + + # Because we are using xmlrpclib for serialization instead of + # pickle this will cause a serialization error. + self.assertRaises(Exception, queue.put, time.sleep) + + # Make queue finalizer run before the server is stopped + del queue + + +@hashlib_helper.requires_hashdigest('md5') +class _TestManagerRestart(BaseTestCase): + + @classmethod + def _putter(cls, address, authkey): + manager = QueueManager( + address=address, authkey=authkey, serializer=SERIALIZER) + manager.connect() + queue = manager.get_queue() + queue.put('hello world') + + def test_rapid_restart(self): + authkey = os.urandom(32) + manager = QueueManager( + address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER) + try: + srvr = manager.get_server() + addr = srvr.address + # Close the connection.Listener socket which gets opened as a part + # of manager.get_server(). It's not needed for the test. + srvr.listener.close() + manager.start() + + p = self.Process(target=self._putter, args=(manager.address, authkey)) + p.start() + p.join() + queue = manager.get_queue() + self.assertEqual(queue.get(), 'hello world') + del queue + finally: + if hasattr(manager, "shutdown"): + manager.shutdown() + + manager = QueueManager( + address=addr, authkey=authkey, serializer=SERIALIZER) + try: + manager.start() + self.addCleanup(manager.shutdown) + except OSError as e: + if e.errno != errno.EADDRINUSE: + raise + # Retry after some time, in case the old socket was lingering + # (sporadic failure on buildbots) + time.sleep(1.0) + manager = QueueManager( + address=addr, authkey=authkey, serializer=SERIALIZER) + if hasattr(manager, "shutdown"): + self.addCleanup(manager.shutdown) + +# +# +# + +SENTINEL = latin('') + +class _TestConnection(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + @classmethod + def _echo(cls, conn): + for msg in iter(conn.recv_bytes, SENTINEL): + conn.send_bytes(msg) + conn.close() + + def test_connection(self): + conn, child_conn = self.Pipe() + + p = self.Process(target=self._echo, args=(child_conn,)) + p.daemon = True + p.start() + + seq = [1, 2.25, None] + msg = latin('hello world') + longmsg = msg * 10 + arr = array.array('i', list(range(4))) + + if self.TYPE == 'processes': + self.assertEqual(type(conn.fileno()), int) + + self.assertEqual(conn.send(seq), None) + self.assertEqual(conn.recv(), seq) + + self.assertEqual(conn.send_bytes(msg), None) + self.assertEqual(conn.recv_bytes(), msg) + + if self.TYPE == 'processes': + buffer = array.array('i', [0]*10) + expected = list(arr) + [0] * (10 - len(arr)) + self.assertEqual(conn.send_bytes(arr), None) + self.assertEqual(conn.recv_bytes_into(buffer), + len(arr) * buffer.itemsize) + self.assertEqual(list(buffer), expected) + + buffer = array.array('i', [0]*10) + expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) + self.assertEqual(conn.send_bytes(arr), None) + self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), + len(arr) * buffer.itemsize) + self.assertEqual(list(buffer), expected) + + buffer = bytearray(latin(' ' * 40)) + self.assertEqual(conn.send_bytes(longmsg), None) + try: + res = conn.recv_bytes_into(buffer) + except multiprocessing.BufferTooShort as e: + self.assertEqual(e.args, (longmsg,)) + else: + self.fail('expected BufferTooShort, got %s' % res) + + poll = TimingWrapper(conn.poll) + + self.assertEqual(poll(), False) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(poll(-1), False) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(poll(TIMEOUT1), False) + self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) + + conn.send(None) + time.sleep(.1) + + self.assertEqual(poll(TIMEOUT1), True) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(conn.recv(), None) + + really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb + conn.send_bytes(really_big_msg) + self.assertEqual(conn.recv_bytes(), really_big_msg) + + conn.send_bytes(SENTINEL) # tell child to quit + child_conn.close() + + if self.TYPE == 'processes': + self.assertEqual(conn.readable, True) + self.assertEqual(conn.writable, True) + self.assertRaises(EOFError, conn.recv) + self.assertRaises(EOFError, conn.recv_bytes) + + p.join() + + def test_duplex_false(self): + reader, writer = self.Pipe(duplex=False) + self.assertEqual(writer.send(1), None) + self.assertEqual(reader.recv(), 1) + if self.TYPE == 'processes': + self.assertEqual(reader.readable, True) + self.assertEqual(reader.writable, False) + self.assertEqual(writer.readable, False) + self.assertEqual(writer.writable, True) + self.assertRaises(OSError, reader.send, 2) + self.assertRaises(OSError, writer.recv) + self.assertRaises(OSError, writer.poll) + + def test_spawn_close(self): + # We test that a pipe connection can be closed by parent + # process immediately after child is spawned. On Windows this + # would have sometimes failed on old versions because + # child_conn would be closed before the child got a chance to + # duplicate it. + conn, child_conn = self.Pipe() + + p = self.Process(target=self._echo, args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() # this might complete before child initializes + + msg = latin('hello') + conn.send_bytes(msg) + self.assertEqual(conn.recv_bytes(), msg) + + conn.send_bytes(SENTINEL) + conn.close() + p.join() + + def test_sendbytes(self): + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + msg = latin('abcdefghijklmnopqrstuvwxyz') + a, b = self.Pipe() + + a.send_bytes(msg) + self.assertEqual(b.recv_bytes(), msg) + + a.send_bytes(msg, 5) + self.assertEqual(b.recv_bytes(), msg[5:]) + + a.send_bytes(msg, 7, 8) + self.assertEqual(b.recv_bytes(), msg[7:7+8]) + + a.send_bytes(msg, 26) + self.assertEqual(b.recv_bytes(), latin('')) + + a.send_bytes(msg, 26, 0) + self.assertEqual(b.recv_bytes(), latin('')) + + self.assertRaises(ValueError, a.send_bytes, msg, 27) + + self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) + + self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) + + self.assertRaises(ValueError, a.send_bytes, msg, -1) + + self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) + + @classmethod + def _is_fd_assigned(cls, fd): + try: + os.fstat(fd) + except OSError as e: + if e.errno == errno.EBADF: + return False + raise + else: + return True + + @classmethod + def _writefd(cls, conn, data, create_dummy_fds=False): + if create_dummy_fds: + for i in range(0, 256): + if not cls._is_fd_assigned(i): + os.dup2(conn.fileno(), i) + fd = reduction.recv_handle(conn) + if msvcrt: + fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) + os.write(fd, data) + os.close(fd) + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + def test_fd_transfer(self): + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._writefd, args=(child_conn, b"foo")) + p.daemon = True + p.start() + self.addCleanup(test.support.unlink, test.support.TESTFN) + with open(test.support.TESTFN, "wb") as f: + fd = f.fileno() + if msvcrt: + fd = msvcrt.get_osfhandle(fd) + reduction.send_handle(conn, fd, p.pid) + p.join() + with open(test.support.TESTFN, "rb") as f: + self.assertEqual(f.read(), b"foo") + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + @unittest.skipIf(sys.platform == "win32", + "test semantics don't make sense on Windows") + @unittest.skipIf(MAXFD <= 256, + "largest assignable fd number is too small") + @unittest.skipUnless(hasattr(os, "dup2"), + "test needs os.dup2()") + def test_large_fd_transfer(self): + # With fd > 256 (issue #11657) + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) + p.daemon = True + p.start() + self.addCleanup(test.support.unlink, test.support.TESTFN) + with open(test.support.TESTFN, "wb") as f: + fd = f.fileno() + for newfd in range(256, MAXFD): + if not self._is_fd_assigned(newfd): + break + else: + self.fail("could not find an unassigned large file descriptor") + os.dup2(fd, newfd) + try: + reduction.send_handle(conn, newfd, p.pid) + finally: + os.close(newfd) + p.join() + with open(test.support.TESTFN, "rb") as f: + self.assertEqual(f.read(), b"bar") + + @classmethod + def _send_data_without_fd(self, conn): + os.write(conn.fileno(), b"\0") + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") + def test_missing_fd_transfer(self): + # Check that exception is raised when received data is not + # accompanied by a file descriptor in ancillary data. + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) + p.daemon = True + p.start() + self.assertRaises(RuntimeError, reduction.recv_handle, conn) + p.join() + + def test_context(self): + a, b = self.Pipe() + + with a, b: + a.send(1729) + self.assertEqual(b.recv(), 1729) + if self.TYPE == 'processes': + self.assertFalse(a.closed) + self.assertFalse(b.closed) + + if self.TYPE == 'processes': + self.assertTrue(a.closed) + self.assertTrue(b.closed) + self.assertRaises(OSError, a.recv) + self.assertRaises(OSError, b.recv) + +class _TestListener(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_multiple_bind(self): + for family in self.connection.families: + l = self.connection.Listener(family=family) + self.addCleanup(l.close) + self.assertRaises(OSError, self.connection.Listener, + l.address, family) + + def test_context(self): + with self.connection.Listener() as l: + with self.connection.Client(l.address) as c: + with l.accept() as d: + c.send(1729) + self.assertEqual(d.recv(), 1729) + + if self.TYPE == 'processes': + self.assertRaises(OSError, l.accept) + + @unittest.skipUnless(util.abstract_sockets_supported, + "test needs abstract socket support") + def test_abstract_socket(self): + with self.connection.Listener("\0something") as listener: + with self.connection.Client(listener.address) as client: + with listener.accept() as d: + client.send(1729) + self.assertEqual(d.recv(), 1729) + + if self.TYPE == 'processes': + self.assertRaises(OSError, listener.accept) + + +class _TestListenerClient(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + @classmethod + def _test(cls, address): + conn = cls.connection.Client(address) + conn.send('hello') + conn.close() + + def test_listener_client(self): + for family in self.connection.families: + l = self.connection.Listener(family=family) + p = self.Process(target=self._test, args=(l.address,)) + p.daemon = True + p.start() + conn = l.accept() + self.assertEqual(conn.recv(), 'hello') + p.join() + l.close() + + def test_issue14725(self): + l = self.connection.Listener() + p = self.Process(target=self._test, args=(l.address,)) + p.daemon = True + p.start() + time.sleep(1) + # On Windows the client process should by now have connected, + # written data and closed the pipe handle by now. This causes + # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue + # 14725. + conn = l.accept() + self.assertEqual(conn.recv(), 'hello') + conn.close() + p.join() + l.close() + + def test_issue16955(self): + for fam in self.connection.families: + l = self.connection.Listener(family=fam) + c = self.connection.Client(l.address) + a = l.accept() + a.send_bytes(b"hello") + self.assertTrue(c.poll(1)) + a.close() + c.close() + l.close() + +class _TestPoll(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def test_empty_string(self): + a, b = self.Pipe() + self.assertEqual(a.poll(), False) + b.send_bytes(b'') + self.assertEqual(a.poll(), True) + self.assertEqual(a.poll(), True) + + @classmethod + def _child_strings(cls, conn, strings): + for s in strings: + time.sleep(0.1) + conn.send_bytes(s) + conn.close() + + def test_strings(self): + strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') + a, b = self.Pipe() + p = self.Process(target=self._child_strings, args=(b, strings)) + p.start() + + for s in strings: + for i in range(200): + if a.poll(0.01): + break + x = a.recv_bytes() + self.assertEqual(s, x) + + p.join() + + @classmethod + def _child_boundaries(cls, r): + # Polling may "pull" a message in to the child process, but we + # don't want it to pull only part of a message, as that would + # corrupt the pipe for any other processes which might later + # read from it. + r.poll(5) + + def test_boundaries(self): + r, w = self.Pipe(False) + p = self.Process(target=self._child_boundaries, args=(r,)) + p.start() + time.sleep(2) + L = [b"first", b"second"] + for obj in L: + w.send_bytes(obj) + w.close() + p.join() + self.assertIn(r.recv_bytes(), L) + + @classmethod + def _child_dont_merge(cls, b): + b.send_bytes(b'a') + b.send_bytes(b'b') + b.send_bytes(b'cd') + + def test_dont_merge(self): + a, b = self.Pipe() + self.assertEqual(a.poll(0.0), False) + self.assertEqual(a.poll(0.1), False) + + p = self.Process(target=self._child_dont_merge, args=(b,)) + p.start() + + self.assertEqual(a.recv_bytes(), b'a') + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.recv_bytes(), b'b') + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(0.0), True) + self.assertEqual(a.recv_bytes(), b'cd') + + p.join() + +# +# Test of sending connection and socket objects between processes +# + +@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") +@hashlib_helper.requires_hashdigest('md5') +class _TestPicklingConnections(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def tearDownClass(cls): + from multiprocess import resource_sharer + resource_sharer.stop(timeout=support.LONG_TIMEOUT) + + @classmethod + def _listener(cls, conn, families): + for fam in families: + l = cls.connection.Listener(family=fam) + conn.send(l.address) + new_conn = l.accept() + conn.send(new_conn) + new_conn.close() + l.close() + + l = socket.create_server((socket_helper.HOST, 0)) + conn.send(l.getsockname()) + new_conn, addr = l.accept() + conn.send(new_conn) + new_conn.close() + l.close() + + conn.recv() + + @classmethod + def _remote(cls, conn): + for (address, msg) in iter(conn.recv, None): + client = cls.connection.Client(address) + client.send(msg.upper()) + client.close() + + address, msg = conn.recv() + client = socket.socket() + client.connect(address) + client.sendall(msg.upper()) + client.close() + + conn.close() + + def test_pickling(self): + families = self.connection.families + + lconn, lconn0 = self.Pipe() + lp = self.Process(target=self._listener, args=(lconn0, families)) + lp.daemon = True + lp.start() + lconn0.close() + + rconn, rconn0 = self.Pipe() + rp = self.Process(target=self._remote, args=(rconn0,)) + rp.daemon = True + rp.start() + rconn0.close() + + for fam in families: + msg = ('This connection uses family %s' % fam).encode('ascii') + address = lconn.recv() + rconn.send((address, msg)) + new_conn = lconn.recv() + self.assertEqual(new_conn.recv(), msg.upper()) + + rconn.send(None) + + msg = latin('This connection uses a normal socket') + address = lconn.recv() + rconn.send((address, msg)) + new_conn = lconn.recv() + buf = [] + while True: + s = new_conn.recv(100) + if not s: + break + buf.append(s) + buf = b''.join(buf) + self.assertEqual(buf, msg.upper()) + new_conn.close() + + lconn.send(None) + + rconn.close() + lconn.close() + + lp.join() + rp.join() + + @classmethod + def child_access(cls, conn): + w = conn.recv() + w.send('all is well') + w.close() + + r = conn.recv() + msg = r.recv() + conn.send(msg*2) + + conn.close() + + def test_access(self): + # On Windows, if we do not specify a destination pid when + # using DupHandle then we need to be careful to use the + # correct access flags for DuplicateHandle(), or else + # DupHandle.detach() will raise PermissionError. For example, + # for a read only pipe handle we should use + # access=FILE_GENERIC_READ. (Unfortunately + # DUPLICATE_SAME_ACCESS does not work.) + conn, child_conn = self.Pipe() + p = self.Process(target=self.child_access, args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + + r, w = self.Pipe(duplex=False) + conn.send(w) + w.close() + self.assertEqual(r.recv(), 'all is well') + r.close() + + r, w = self.Pipe(duplex=False) + conn.send(r) + r.close() + w.send('foobar') + w.close() + self.assertEqual(conn.recv(), 'foobar'*2) + + p.join() + +# +# +# + +class _TestHeap(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def setUp(self): + super().setUp() + # Make pristine heap for these tests + self.old_heap = multiprocessing.heap.BufferWrapper._heap + multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() + + def tearDown(self): + multiprocessing.heap.BufferWrapper._heap = self.old_heap + super().tearDown() + + def test_heap(self): + iterations = 5000 + maxblocks = 50 + blocks = [] + + # get the heap object + heap = multiprocessing.heap.BufferWrapper._heap + heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 + + # create and destroy lots of blocks of different sizes + for i in range(iterations): + size = int(random.lognormvariate(0, 1) * 1000) + b = multiprocessing.heap.BufferWrapper(size) + blocks.append(b) + if len(blocks) > maxblocks: + i = random.randrange(maxblocks) + del blocks[i] + del b + + # verify the state of the heap + with heap._lock: + all = [] + free = 0 + occupied = 0 + for L in list(heap._len_to_seq.values()): + # count all free blocks in arenas + for arena, start, stop in L: + all.append((heap._arenas.index(arena), start, stop, + stop-start, 'free')) + free += (stop-start) + for arena, arena_blocks in heap._allocated_blocks.items(): + # count all allocated blocks in arenas + for start, stop in arena_blocks: + all.append((heap._arenas.index(arena), start, stop, + stop-start, 'occupied')) + occupied += (stop-start) + + self.assertEqual(free + occupied, + sum(arena.size for arena in heap._arenas)) + + all.sort() + + for i in range(len(all)-1): + (arena, start, stop) = all[i][:3] + (narena, nstart, nstop) = all[i+1][:3] + if arena != narena: + # Two different arenas + self.assertEqual(stop, heap._arenas[arena].size) # last block + self.assertEqual(nstart, 0) # first block + else: + # Same arena: two adjacent blocks + self.assertEqual(stop, nstart) + + # test free'ing all blocks + random.shuffle(blocks) + while blocks: + blocks.pop() + + self.assertEqual(heap._n_frees, heap._n_mallocs) + self.assertEqual(len(heap._pending_free_blocks), 0) + self.assertEqual(len(heap._arenas), 0) + self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) + self.assertEqual(len(heap._len_to_seq), 0) + + def test_free_from_gc(self): + # Check that freeing of blocks by the garbage collector doesn't deadlock + # (issue #12352). + # Make sure the GC is enabled, and set lower collection thresholds to + # make collections more frequent (and increase the probability of + # deadlock). + if not gc.isenabled(): + gc.enable() + self.addCleanup(gc.disable) + thresholds = gc.get_threshold() + self.addCleanup(gc.set_threshold, *thresholds) + gc.set_threshold(10) + + # perform numerous block allocations, with cyclic references to make + # sure objects are collected asynchronously by the gc + for i in range(5000): + a = multiprocessing.heap.BufferWrapper(1) + b = multiprocessing.heap.BufferWrapper(1) + # circular references + a.buddy = b + b.buddy = a + +# +# +# + +class _Foo(Structure): + _fields_ = [ + ('x', c_int), + ('y', c_double), + ('z', c_longlong,) + ] + +class _TestSharedCTypes(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def setUp(self): + if not HAS_SHAREDCTYPES: + self.skipTest("requires multiprocess.sharedctypes") + + @classmethod + def _double(cls, x, y, z, foo, arr, string): + x.value *= 2 + y.value *= 2 + z.value *= 2 + foo.x *= 2 + foo.y *= 2 + string.value *= 2 + for i in range(len(arr)): + arr[i] *= 2 + + def test_sharedctypes(self, lock=False): + x = Value('i', 7, lock=lock) + y = Value(c_double, 1.0/3.0, lock=lock) + z = Value(c_longlong, 2 ** 33, lock=lock) + foo = Value(_Foo, 3, 2, lock=lock) + arr = self.Array('d', list(range(10)), lock=lock) + string = self.Array('c', 20, lock=lock) + string.value = latin('hello') + + p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) + p.daemon = True + p.start() + p.join() + + self.assertEqual(x.value, 14) + self.assertAlmostEqual(y.value, 2.0/3.0) + self.assertEqual(z.value, 2 ** 34) + self.assertEqual(foo.x, 6) + self.assertAlmostEqual(foo.y, 4.0) + for i in range(10): + self.assertAlmostEqual(arr[i], i*2) + self.assertEqual(string.value, latin('hellohello')) + + def test_synchronize(self): + self.test_sharedctypes(lock=True) + + def test_copy(self): + foo = _Foo(2, 5.0, 2 ** 33) + bar = copy(foo) + foo.x = 0 + foo.y = 0 + foo.z = 0 + self.assertEqual(bar.x, 2) + self.assertAlmostEqual(bar.y, 5.0) + self.assertEqual(bar.z, 2 ** 33) + + +@unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") +@hashlib_helper.requires_hashdigest('md5') +class _TestSharedMemory(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @staticmethod + def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): + if isinstance(shmem_name_or_obj, str): + local_sms = shared_memory.SharedMemory(shmem_name_or_obj) + else: + local_sms = shmem_name_or_obj + local_sms.buf[:len(binary_data)] = binary_data + local_sms.close() + + def test_shared_memory_basics(self): + sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512) + self.addCleanup(sms.unlink) + + # Verify attributes are readable. + self.assertEqual(sms.name, 'test01_tsmb') + self.assertGreaterEqual(sms.size, 512) + self.assertGreaterEqual(len(sms.buf), sms.size) + + # Modify contents of shared memory segment through memoryview. + sms.buf[0] = 42 + self.assertEqual(sms.buf[0], 42) + + # Attach to existing shared memory segment. + also_sms = shared_memory.SharedMemory('test01_tsmb') + self.assertEqual(also_sms.buf[0], 42) + also_sms.close() + + # Attach to existing shared memory segment but specify a new size. + same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size) + self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. + same_sms.close() + + # Creating Shared Memory Segment with -ve size + with self.assertRaises(ValueError): + shared_memory.SharedMemory(create=True, size=-2) + + # Attaching Shared Memory Segment without a name + with self.assertRaises(ValueError): + shared_memory.SharedMemory(create=False) + + # Test if shared memory segment is created properly, + # when _make_filename returns an existing shared memory segment name + with unittest.mock.patch( + 'multiprocessing.shared_memory._make_filename') as mock_make_filename: + + NAME_PREFIX = shared_memory._SHM_NAME_PREFIX + names = ['test01_fn', 'test02_fn'] + # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary + # because some POSIX compliant systems require name to start with / + names = [NAME_PREFIX + name for name in names] + + mock_make_filename.side_effect = names + shm1 = shared_memory.SharedMemory(create=True, size=1) + self.addCleanup(shm1.unlink) + self.assertEqual(shm1._name, names[0]) + + mock_make_filename.side_effect = names + shm2 = shared_memory.SharedMemory(create=True, size=1) + self.addCleanup(shm2.unlink) + self.assertEqual(shm2._name, names[1]) + + if shared_memory._USE_POSIX: + # Posix Shared Memory can only be unlinked once. Here we + # test an implementation detail that is not observed across + # all supported platforms (since WindowsNamedSharedMemory + # manages unlinking on its own and unlink() does nothing). + # True release of shared memory segment does not necessarily + # happen until process exits, depending on the OS platform. + with self.assertRaises(FileNotFoundError): + sms_uno = shared_memory.SharedMemory( + 'test01_dblunlink', + create=True, + size=5000 + ) + + try: + self.assertGreaterEqual(sms_uno.size, 5000) + + sms_duo = shared_memory.SharedMemory('test01_dblunlink') + sms_duo.unlink() # First shm_unlink() call. + sms_duo.close() + sms_uno.close() + + finally: + sms_uno.unlink() # A second shm_unlink() call is bad. + + with self.assertRaises(FileExistsError): + # Attempting to create a new shared memory segment with a + # name that is already in use triggers an exception. + there_can_only_be_one_sms = shared_memory.SharedMemory( + 'test01_tsmb', + create=True, + size=512 + ) + + if shared_memory._USE_POSIX: + # Requesting creation of a shared memory segment with the option + # to attach to an existing segment, if that name is currently in + # use, should not trigger an exception. + # Note: Using a smaller size could possibly cause truncation of + # the existing segment but is OS platform dependent. In the + # case of MacOS/darwin, requesting a smaller size is disallowed. + class OptionalAttachSharedMemory(shared_memory.SharedMemory): + _flags = os.O_CREAT | os.O_RDWR + ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb') + self.assertEqual(ok_if_exists_sms.size, sms.size) + ok_if_exists_sms.close() + + # Attempting to attach to an existing shared memory segment when + # no segment exists with the supplied name triggers an exception. + with self.assertRaises(FileNotFoundError): + nonexisting_sms = shared_memory.SharedMemory('test01_notthere') + nonexisting_sms.unlink() # Error should occur on prior line. + + sms.close() + + # Test creating a shared memory segment with negative size + with self.assertRaises(ValueError): + sms_invalid = shared_memory.SharedMemory(create=True, size=-1) + + # Test creating a shared memory segment with size 0 + with self.assertRaises(ValueError): + sms_invalid = shared_memory.SharedMemory(create=True, size=0) + + # Test creating a shared memory segment without size argument + with self.assertRaises(ValueError): + sms_invalid = shared_memory.SharedMemory(create=True) + + def test_shared_memory_across_processes(self): + # bpo-40135: don't define shared memory block's name in case of + # the failure when we run multiprocessing tests in parallel. + sms = shared_memory.SharedMemory(create=True, size=512) + self.addCleanup(sms.unlink) + + # Verify remote attachment to existing block by name is working. + p = self.Process( + target=self._attach_existing_shmem_then_write, + args=(sms.name, b'howdy') + ) + p.daemon = True + p.start() + p.join() + self.assertEqual(bytes(sms.buf[:5]), b'howdy') + + # Verify pickling of SharedMemory instance also works. + p = self.Process( + target=self._attach_existing_shmem_then_write, + args=(sms, b'HELLO') + ) + p.daemon = True + p.start() + p.join() + self.assertEqual(bytes(sms.buf[:5]), b'HELLO') + + sms.close() + + @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") + def test_shared_memory_SharedMemoryServer_ignores_sigint(self): + # bpo-36368: protect SharedMemoryManager server process from + # KeyboardInterrupt signals. + smm = multiprocessing.managers.SharedMemoryManager() + smm.start() + + # make sure the manager works properly at the beginning + sl = smm.ShareableList(range(10)) + + # the manager's server should ignore KeyboardInterrupt signals, and + # maintain its connection with the current process, and success when + # asked to deliver memory segments. + os.kill(smm._process.pid, signal.SIGINT) + + sl2 = smm.ShareableList(range(10)) + + # test that the custom signal handler registered in the Manager does + # not affect signal handling in the parent process. + with self.assertRaises(KeyboardInterrupt): + os.kill(os.getpid(), signal.SIGINT) + + smm.shutdown() + + @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") + def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): + # bpo-36867: test that a SharedMemoryManager uses the + # same resource_tracker process as its parent. + cmd = '''if 1: + from multiprocessing.managers import SharedMemoryManager + + + smm = SharedMemoryManager() + smm.start() + sl = smm.ShareableList(range(10)) + smm.shutdown() + ''' + rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd) + + # Before bpo-36867 was fixed, a SharedMemoryManager not using the same + # resource_tracker process as its parent would make the parent's + # tracker complain about sl being leaked even though smm.shutdown() + # properly released sl. + self.assertFalse(err) + + def test_shared_memory_SharedMemoryManager_basics(self): + smm1 = multiprocessing.managers.SharedMemoryManager() + with self.assertRaises(ValueError): + smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started + smm1.start() + lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] + lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] + doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) + self.assertEqual(len(doppleganger_list0), 5) + doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) + self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) + held_name = lom[0].name + smm1.shutdown() + if sys.platform != "win32": + # Calls to unlink() have no effect on Windows platform; shared + # memory will only be released once final process exits. + with self.assertRaises(FileNotFoundError): + # No longer there to be attached to again. + absent_shm = shared_memory.SharedMemory(name=held_name) + + with multiprocessing.managers.SharedMemoryManager() as smm2: + sl = smm2.ShareableList("howdy") + shm = smm2.SharedMemory(size=128) + held_name = sl.shm.name + if sys.platform != "win32": + with self.assertRaises(FileNotFoundError): + # No longer there to be attached to again. + absent_sl = shared_memory.ShareableList(name=held_name) + + + def test_shared_memory_ShareableList_basics(self): + sl = shared_memory.ShareableList( + ['howdy', b'HoWdY', -273.154, 100, None, True, 42] + ) + self.addCleanup(sl.shm.unlink) + + # Verify attributes are readable. + self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') + + # Exercise len(). + self.assertEqual(len(sl), 7) + + # Exercise index(). + with warnings.catch_warnings(): + # Suppress BytesWarning when comparing against b'HoWdY'. + warnings.simplefilter('ignore') + with self.assertRaises(ValueError): + sl.index('100') + self.assertEqual(sl.index(100), 3) + + # Exercise retrieving individual values. + self.assertEqual(sl[0], 'howdy') + self.assertEqual(sl[-2], True) + + # Exercise iterability. + self.assertEqual( + tuple(sl), + ('howdy', b'HoWdY', -273.154, 100, None, True, 42) + ) + + # Exercise modifying individual values. + sl[3] = 42 + self.assertEqual(sl[3], 42) + sl[4] = 'some' # Change type at a given position. + self.assertEqual(sl[4], 'some') + self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') + with self.assertRaisesRegex(ValueError, + "exceeds available storage"): + sl[4] = 'far too many' + self.assertEqual(sl[4], 'some') + sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data + self.assertEqual(sl[0], 'encodés') + self.assertEqual(sl[1], b'HoWdY') # no spillage + with self.assertRaisesRegex(ValueError, + "exceeds available storage"): + sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data + self.assertEqual(sl[1], b'HoWdY') + with self.assertRaisesRegex(ValueError, + "exceeds available storage"): + sl[1] = b'123456789' + self.assertEqual(sl[1], b'HoWdY') + + # Exercise count(). + with warnings.catch_warnings(): + # Suppress BytesWarning when comparing against b'HoWdY'. + warnings.simplefilter('ignore') + self.assertEqual(sl.count(42), 2) + self.assertEqual(sl.count(b'HoWdY'), 1) + self.assertEqual(sl.count(b'adios'), 0) + + # Exercise creating a duplicate. + sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate') + try: + self.assertNotEqual(sl.shm.name, sl_copy.shm.name) + self.assertEqual('test03_duplicate', sl_copy.shm.name) + self.assertEqual(list(sl), list(sl_copy)) + self.assertEqual(sl.format, sl_copy.format) + sl_copy[-1] = 77 + self.assertEqual(sl_copy[-1], 77) + self.assertNotEqual(sl[-1], 77) + sl_copy.shm.close() + finally: + sl_copy.shm.unlink() + + # Obtain a second handle on the same ShareableList. + sl_tethered = shared_memory.ShareableList(name=sl.shm.name) + self.assertEqual(sl.shm.name, sl_tethered.shm.name) + sl_tethered[-1] = 880 + self.assertEqual(sl[-1], 880) + sl_tethered.shm.close() + + sl.shm.close() + + # Exercise creating an empty ShareableList. + empty_sl = shared_memory.ShareableList() + try: + self.assertEqual(len(empty_sl), 0) + self.assertEqual(empty_sl.format, '') + self.assertEqual(empty_sl.count('any'), 0) + with self.assertRaises(ValueError): + empty_sl.index(None) + empty_sl.shm.close() + finally: + empty_sl.shm.unlink() + + def test_shared_memory_ShareableList_pickling(self): + sl = shared_memory.ShareableList(range(10)) + self.addCleanup(sl.shm.unlink) + + serialized_sl = pickle.dumps(sl) + deserialized_sl = pickle.loads(serialized_sl) + self.assertTrue( + isinstance(deserialized_sl, shared_memory.ShareableList) + ) + self.assertTrue(deserialized_sl[-1], 9) + self.assertFalse(sl is deserialized_sl) + deserialized_sl[4] = "changed" + self.assertEqual(sl[4], "changed") + + # Verify data is not being put into the pickled representation. + name = 'a' * len(sl.shm.name) + larger_sl = shared_memory.ShareableList(range(400)) + self.addCleanup(larger_sl.shm.unlink) + serialized_larger_sl = pickle.dumps(larger_sl) + self.assertTrue(len(serialized_sl) == len(serialized_larger_sl)) + larger_sl.shm.close() + + deserialized_sl.shm.close() + sl.shm.close() + + def test_shared_memory_cleaned_after_process_termination(self): + cmd = '''if 1: + import os, time, sys + from multiprocessing import shared_memory + + # Create a shared_memory segment, and send the segment name + sm = shared_memory.SharedMemory(create=True, size=10) + sys.stdout.write(sm.name + '\\n') + sys.stdout.flush() + time.sleep(100) + ''' + with subprocess.Popen([sys.executable, '-E', '-c', cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) as p: + name = p.stdout.readline().strip().decode() + + # killing abruptly processes holding reference to a shared memory + # segment should not leak the given memory segment. + p.terminate() + p.wait() + + deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT + t = 0.1 + while getattr(time,'monotonic',time.time)() < deadline: + time.sleep(t) + t = min(t*2, 5) + try: + smm = shared_memory.SharedMemory(name, create=False) + except FileNotFoundError: + break + else: + raise AssertionError("A SharedMemory segment was leaked after" + " a process was abruptly terminated.") + + if os.name == 'posix': + # A warning was emitted by the subprocess' own + # resource_tracker (on Windows, shared memory segments + # are released automatically by the OS). + err = p.stderr.read().decode() + self.assertIn( + "resource_tracker: There appear to be 1 leaked " + "shared_memory objects to clean up at shutdown", err) + +# +# +# + +class _TestFinalize(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def setUp(self): + self.registry_backup = util._finalizer_registry.copy() + util._finalizer_registry.clear() + + def tearDown(self): + self.assertFalse(util._finalizer_registry) + util._finalizer_registry.update(self.registry_backup) + + @classmethod + def _test_finalize(cls, conn): + class Foo(object): + pass + + a = Foo() + util.Finalize(a, conn.send, args=('a',)) + del a # triggers callback for a + + b = Foo() + close_b = util.Finalize(b, conn.send, args=('b',)) + close_b() # triggers callback for b + close_b() # does nothing because callback has already been called + del b # does nothing because callback has already been called + + c = Foo() + util.Finalize(c, conn.send, args=('c',)) + + d10 = Foo() + util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) + + d01 = Foo() + util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) + d02 = Foo() + util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) + d03 = Foo() + util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) + + util.Finalize(None, conn.send, args=('e',), exitpriority=-10) + + util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) + + # call multiprocessing's cleanup function then exit process without + # garbage collecting locals + util._exit_function() + conn.close() + os._exit(0) + + def test_finalize(self): + conn, child_conn = self.Pipe() + + p = self.Process(target=self._test_finalize, args=(child_conn,)) + p.daemon = True + p.start() + p.join() + + result = [obj for obj in iter(conn.recv, 'STOP')] + self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) + + def test_thread_safety(self): + # bpo-24484: _run_finalizers() should be thread-safe + def cb(): + pass + + class Foo(object): + def __init__(self): + self.ref = self # create reference cycle + # insert finalizer at random key + util.Finalize(self, cb, exitpriority=random.randint(1, 100)) + + finish = False + exc = None + + def run_finalizers(): + nonlocal exc + while not finish: + time.sleep(random.random() * 1e-1) + try: + # A GC run will eventually happen during this, + # collecting stale Foo's and mutating the registry + util._run_finalizers() + except Exception as e: + exc = e + + def make_finalizers(): + nonlocal exc + d = {} + while not finish: + try: + # Old Foo's get gradually replaced and later + # collected by the GC (because of the cyclic ref) + d[random.getrandbits(5)] = {Foo() for i in range(10)} + except Exception as e: + exc = e + d.clear() + + old_interval = sys.getswitchinterval() + old_threshold = gc.get_threshold() + try: + sys.setswitchinterval(1e-6) + gc.set_threshold(5, 5, 5) + threads = [threading.Thread(target=run_finalizers), + threading.Thread(target=make_finalizers)] + with test.support.start_threads(threads): + time.sleep(4.0) # Wait a bit to trigger race condition + finish = True + if exc is not None: + raise exc + finally: + sys.setswitchinterval(old_interval) + gc.set_threshold(*old_threshold) + gc.collect() # Collect remaining Foo's + + +# +# Test that from ... import * works for each module +# + +class _TestImportStar(unittest.TestCase): + + def get_module_names(self): + import glob + folder = os.path.dirname(multiprocessing.__file__) + pattern = os.path.join(glob.escape(folder), '*.py') + files = glob.glob(pattern) + modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] + modules = ['multiprocess.' + m for m in modules] + modules.remove('multiprocess.__init__') + modules.append('multiprocess') + return modules + + def test_import(self): + modules = self.get_module_names() + if sys.platform == 'win32': + modules.remove('multiprocess.popen_fork') + modules.remove('multiprocess.popen_forkserver') + modules.remove('multiprocess.popen_spawn_posix') + else: + modules.remove('multiprocess.popen_spawn_win32') + if not HAS_REDUCTION: + modules.remove('multiprocess.popen_forkserver') + + if c_int is None: + # This module requires _ctypes + modules.remove('multiprocess.sharedctypes') + + for name in modules: + __import__(name) + mod = sys.modules[name] + self.assertTrue(hasattr(mod, '__all__'), name) + + for attr in mod.__all__: + self.assertTrue( + hasattr(mod, attr), + '%r does not have attribute %r' % (mod, attr) + ) + +# +# Quick test that logging works -- does not test logging output +# + +class _TestLogging(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_enable_logging(self): + logger = multiprocessing.get_logger() + logger.setLevel(util.SUBWARNING) + self.assertTrue(logger is not None) + logger.debug('this will not be printed') + logger.info('nor will this') + logger.setLevel(LOG_LEVEL) + + @classmethod + def _test_level(cls, conn): + logger = multiprocessing.get_logger() + conn.send(logger.getEffectiveLevel()) + + def test_level(self): + LEVEL1 = 32 + LEVEL2 = 37 + + logger = multiprocessing.get_logger() + root_logger = logging.getLogger() + root_level = root_logger.level + + reader, writer = multiprocessing.Pipe(duplex=False) + + logger.setLevel(LEVEL1) + p = self.Process(target=self._test_level, args=(writer,)) + p.start() + self.assertEqual(LEVEL1, reader.recv()) + p.join() + p.close() + + logger.setLevel(logging.NOTSET) + root_logger.setLevel(LEVEL2) + p = self.Process(target=self._test_level, args=(writer,)) + p.start() + self.assertEqual(LEVEL2, reader.recv()) + p.join() + p.close() + + root_logger.setLevel(root_level) + logger.setLevel(level=LOG_LEVEL) + + +# class _TestLoggingProcessName(BaseTestCase): +# +# def handle(self, record): +# assert record.processName == multiprocessing.current_process().name +# self.__handled = True +# +# def test_logging(self): +# handler = logging.Handler() +# handler.handle = self.handle +# self.__handled = False +# # Bypass getLogger() and side-effects +# logger = logging.getLoggerClass()( +# 'multiprocessing.test.TestLoggingProcessName') +# logger.addHandler(handler) +# logger.propagate = False +# +# logger.warn('foo') +# assert self.__handled + +# +# Check that Process.join() retries if os.waitpid() fails with EINTR +# + +class _TestPollEintr(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def _killer(cls, pid): + time.sleep(0.1) + os.kill(pid, signal.SIGUSR1) + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_poll_eintr(self): + got_signal = [False] + def record(*args): + got_signal[0] = True + pid = os.getpid() + oldhandler = signal.signal(signal.SIGUSR1, record) + try: + killer = self.Process(target=self._killer, args=(pid,)) + killer.start() + try: + p = self.Process(target=time.sleep, args=(2,)) + p.start() + p.join() + finally: + killer.join() + self.assertTrue(got_signal[0]) + self.assertEqual(p.exitcode, 0) + finally: + signal.signal(signal.SIGUSR1, oldhandler) + +# +# Test to verify handle verification, see issue 3321 +# + +class TestInvalidHandle(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_handles(self): + conn = multiprocessing.connection.Connection(44977608) + # check that poll() doesn't crash + try: + conn.poll() + except (ValueError, OSError): + pass + finally: + # Hack private attribute _handle to avoid printing an error + # in conn.__del__ + conn._handle = None + self.assertRaises((ValueError, OSError), + multiprocessing.connection.Connection, -1) + + + +@hashlib_helper.requires_hashdigest('md5') +class OtherTest(unittest.TestCase): + # TODO: add more tests for deliver/answer challenge. + def test_deliver_challenge_auth_failure(self): + class _FakeConnection(object): + def recv_bytes(self, size): + return b'something bogus' + def send_bytes(self, data): + pass + self.assertRaises(multiprocessing.AuthenticationError, + multiprocessing.connection.deliver_challenge, + _FakeConnection(), b'abc') + + def test_answer_challenge_auth_failure(self): + class _FakeConnection(object): + def __init__(self): + self.count = 0 + def recv_bytes(self, size): + self.count += 1 + if self.count == 1: + return multiprocessing.connection.CHALLENGE + elif self.count == 2: + return b'something bogus' + return b'' + def send_bytes(self, data): + pass + self.assertRaises(multiprocessing.AuthenticationError, + multiprocessing.connection.answer_challenge, + _FakeConnection(), b'abc') + +# +# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 +# + +def initializer(ns): + ns.test += 1 + +@hashlib_helper.requires_hashdigest('md5') +class TestInitializers(unittest.TestCase): + def setUp(self): + self.mgr = multiprocessing.Manager() + self.ns = self.mgr.Namespace() + self.ns.test = 0 + + def tearDown(self): + self.mgr.shutdown() + self.mgr.join() + + def test_manager_initializer(self): + m = multiprocessing.managers.SyncManager() + self.assertRaises(TypeError, m.start, 1) + m.start(initializer, (self.ns,)) + self.assertEqual(self.ns.test, 1) + m.shutdown() + m.join() + + def test_pool_initializer(self): + self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) + p = multiprocessing.Pool(1, initializer, (self.ns,)) + p.close() + p.join() + self.assertEqual(self.ns.test, 1) + +# +# Issue 5155, 5313, 5331: Test process in processes +# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior +# + +def _this_sub_process(q): + try: + item = q.get(block=False) + except pyqueue.Empty: + pass + +def _test_process(): + queue = multiprocessing.Queue() + subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) + subProc.daemon = True + subProc.start() + subProc.join() + +def _afunc(x): + return x*x + +def pool_in_process(): + pool = multiprocessing.Pool(processes=4) + x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) + pool.close() + pool.join() + +class _file_like(object): + def __init__(self, delegate): + self._delegate = delegate + self._pid = None + + @property + def cache(self): + pid = os.getpid() + # There are no race conditions since fork keeps only the running thread + if pid != self._pid: + self._pid = pid + self._cache = [] + return self._cache + + def write(self, data): + self.cache.append(data) + + def flush(self): + self._delegate.write(''.join(self.cache)) + self._cache = [] + +class TestStdinBadfiledescriptor(unittest.TestCase): + + def test_queue_in_process(self): + proc = multiprocessing.Process(target=_test_process) + proc.start() + proc.join() + + def test_pool_in_process(self): + p = multiprocessing.Process(target=pool_in_process) + p.start() + p.join() + + def test_flushing(self): + sio = io.StringIO() + flike = _file_like(sio) + flike.write('foo') + proc = multiprocessing.Process(target=lambda: flike.flush()) + flike.flush() + assert sio.getvalue() == 'foo' + + +class TestWait(unittest.TestCase): + + @classmethod + def _child_test_wait(cls, w, slow): + for i in range(10): + if slow: + time.sleep(random.random()*0.1) + w.send((i, os.getpid())) + w.close() + + def test_wait(self, slow=False): + from multiprocess.connection import wait + readers = [] + procs = [] + messages = [] + + for i in range(4): + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) + p.daemon = True + p.start() + w.close() + readers.append(r) + procs.append(p) + self.addCleanup(p.join) + + while readers: + for r in wait(readers): + try: + msg = r.recv() + except EOFError: + readers.remove(r) + r.close() + else: + messages.append(msg) + + messages.sort() + expected = sorted((i, p.pid) for i in range(10) for p in procs) + self.assertEqual(messages, expected) + + @classmethod + def _child_test_wait_socket(cls, address, slow): + s = socket.socket() + s.connect(address) + for i in range(10): + if slow: + time.sleep(random.random()*0.1) + s.sendall(('%s\n' % i).encode('ascii')) + s.close() + + def test_wait_socket(self, slow=False): + from multiprocess.connection import wait + l = socket.create_server((socket_helper.HOST, 0)) + addr = l.getsockname() + readers = [] + procs = [] + dic = {} + + for i in range(4): + p = multiprocessing.Process(target=self._child_test_wait_socket, + args=(addr, slow)) + p.daemon = True + p.start() + procs.append(p) + self.addCleanup(p.join) + + for i in range(4): + r, _ = l.accept() + readers.append(r) + dic[r] = [] + l.close() + + while readers: + for r in wait(readers): + msg = r.recv(32) + if not msg: + readers.remove(r) + r.close() + else: + dic[r].append(msg) + + expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') + for v in dic.values(): + self.assertEqual(b''.join(v), expected) + + def test_wait_slow(self): + self.test_wait(True) + + def test_wait_socket_slow(self): + self.test_wait_socket(True) + + def test_wait_timeout(self): + from multiprocess.connection import wait + + expected = 5 + a, b = multiprocessing.Pipe() + + start = getattr(time,'monotonic',time.time)() + res = wait([a, b], expected) + delta = getattr(time,'monotonic',time.time)() - start + + self.assertEqual(res, []) + self.assertLess(delta, expected * 2) + self.assertGreater(delta, expected * 0.5) + + b.send(None) + + start = getattr(time,'monotonic',time.time)() + res = wait([a, b], 20) + delta = getattr(time,'monotonic',time.time)() - start + + self.assertEqual(res, [a]) + self.assertLess(delta, 0.4) + + @classmethod + def signal_and_sleep(cls, sem, period): + sem.release() + time.sleep(period) + + def test_wait_integer(self): + from multiprocess.connection import wait + + expected = 3 + sorted_ = lambda l: sorted(l, key=lambda x: id(x)) + sem = multiprocessing.Semaphore(0) + a, b = multiprocessing.Pipe() + p = multiprocessing.Process(target=self.signal_and_sleep, + args=(sem, expected)) + + p.start() + self.assertIsInstance(p.sentinel, int) + self.assertTrue(sem.acquire(timeout=20)) + + start = getattr(time,'monotonic',time.time)() + res = wait([a, p.sentinel, b], expected + 20) + delta = getattr(time,'monotonic',time.time)() - start + + self.assertEqual(res, [p.sentinel]) + self.assertLess(delta, expected + 2) + self.assertGreater(delta, expected - 2) + + a.send(None) + + start = getattr(time,'monotonic',time.time)() + res = wait([a, p.sentinel, b], 20) + delta = getattr(time,'monotonic',time.time)() - start + + self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) + self.assertLess(delta, 0.4) + + b.send(None) + + start = getattr(time,'monotonic',time.time)() + res = wait([a, p.sentinel, b], 20) + delta = getattr(time,'monotonic',time.time)() - start + + self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) + self.assertLess(delta, 0.4) + + p.terminate() + p.join() + + def test_neg_timeout(self): + from multiprocess.connection import wait + a, b = multiprocessing.Pipe() + t = getattr(time,'monotonic',time.time)() + res = wait([a], timeout=-1) + t = getattr(time,'monotonic',time.time)() - t + self.assertEqual(res, []) + self.assertLess(t, 1) + a.close() + b.close() + +# +# Issue 14151: Test invalid family on invalid environment +# + +class TestInvalidFamily(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_family(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener(r'\\.\test') + + @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") + def test_invalid_family_win32(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener('/var/test.pipe') + +# +# Issue 12098: check sys.flags of child matches that for parent +# + +class TestFlags(unittest.TestCase): + @classmethod + def run_in_grandchild(cls, conn): + conn.send(tuple(sys.flags)) + + @classmethod + def run_in_child(cls): + import json + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) + p.start() + grandchild_flags = r.recv() + p.join() + r.close() + w.close() + flags = (tuple(sys.flags), grandchild_flags) + print(json.dumps(flags)) + + def _test_flags(self): + import json + # start child process using unusual flags + prog = ('from multiprocess.tests import TestFlags; ' + + 'TestFlags.run_in_child()') + data = subprocess.check_output( + [sys.executable, '-E', '-S', '-O', '-c', prog]) + child_flags, grandchild_flags = json.loads(data.decode('ascii')) + self.assertEqual(child_flags, grandchild_flags) + +# +# Test interaction with socket timeouts - see Issue #6056 +# + +class TestTimeouts(unittest.TestCase): + @classmethod + def _test_timeout(cls, child, address): + time.sleep(1) + child.send(123) + child.close() + conn = multiprocessing.connection.Client(address) + conn.send(456) + conn.close() + + def test_timeout(self): + old_timeout = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(0.1) + parent, child = multiprocessing.Pipe(duplex=True) + l = multiprocessing.connection.Listener(family='AF_INET') + p = multiprocessing.Process(target=self._test_timeout, + args=(child, l.address)) + p.start() + child.close() + self.assertEqual(parent.recv(), 123) + parent.close() + conn = l.accept() + self.assertEqual(conn.recv(), 456) + conn.close() + l.close() + join_process(p) + finally: + socket.setdefaulttimeout(old_timeout) + +# +# Test what happens with no "if __name__ == '__main__'" +# + +class TestNoForkBomb(unittest.TestCase): + def test_noforkbomb(self): + sm = multiprocessing.get_start_method() + name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') + if sm != 'fork': + rc, out, err = test.support.script_helper.assert_python_failure(name, sm) + self.assertEqual(out, b'') + self.assertIn(b'RuntimeError', err) + else: + rc, out, err = test.support.script_helper.assert_python_ok(name, sm) + self.assertEqual(out.rstrip(), b'123') + self.assertEqual(err, b'') + +# +# Issue #17555: ForkAwareThreadLock +# + +class TestForkAwareThreadLock(unittest.TestCase): + # We recursively start processes. Issue #17555 meant that the + # after fork registry would get duplicate entries for the same + # lock. The size of the registry at generation n was ~2**n. + + @classmethod + def child(cls, n, conn): + if n > 1: + p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) + p.start() + conn.close() + join_process(p) + else: + conn.send(len(util._afterfork_registry)) + conn.close() + + def test_lock(self): + r, w = multiprocessing.Pipe(False) + l = util.ForkAwareThreadLock() + old_size = len(util._afterfork_registry) + p = multiprocessing.Process(target=self.child, args=(5, w)) + p.start() + w.close() + new_size = r.recv() + join_process(p) + self.assertLessEqual(new_size, old_size) + +# +# Check that non-forked child processes do not inherit unneeded fds/handles +# + +class TestCloseFds(unittest.TestCase): + + def get_high_socket_fd(self): + if WIN32: + # The child process will not have any socket handles, so + # calling socket.fromfd() should produce WSAENOTSOCK even + # if there is a handle of the same number. + return socket.socket().detach() + else: + # We want to produce a socket with an fd high enough that a + # freshly created child process will not have any fds as high. + fd = socket.socket().detach() + to_close = [] + while fd < 50: + to_close.append(fd) + fd = os.dup(fd) + for x in to_close: + os.close(x) + return fd + + def close(self, fd): + if WIN32: + socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() + else: + os.close(fd) + + @classmethod + def _test_closefds(cls, conn, fd): + try: + s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) + except Exception as e: + conn.send(e) + else: + s.close() + conn.send(None) + + def test_closefd(self): + if not HAS_REDUCTION: + raise unittest.SkipTest('requires fd pickling') + + reader, writer = multiprocessing.Pipe() + fd = self.get_high_socket_fd() + try: + p = multiprocessing.Process(target=self._test_closefds, + args=(writer, fd)) + p.start() + writer.close() + e = reader.recv() + join_process(p) + finally: + self.close(fd) + writer.close() + reader.close() + + if multiprocessing.get_start_method() == 'fork': + self.assertIs(e, None) + else: + WSAENOTSOCK = 10038 + self.assertIsInstance(e, OSError) + self.assertTrue(e.errno == errno.EBADF or + e.winerror == WSAENOTSOCK, e) + +# +# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc +# + +class TestIgnoreEINTR(unittest.TestCase): + + # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block + CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) + + @classmethod + def _test_ignore(cls, conn): + def handler(signum, frame): + pass + signal.signal(signal.SIGUSR1, handler) + conn.send('ready') + x = conn.recv() + conn.send(x) + conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_ignore(self): + conn, child_conn = multiprocessing.Pipe() + try: + p = multiprocessing.Process(target=self._test_ignore, + args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + self.assertEqual(conn.recv(), 'ready') + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + time.sleep(0.1) + conn.send(1234) + self.assertEqual(conn.recv(), 1234) + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) + time.sleep(0.1) + p.join() + finally: + conn.close() + + @classmethod + def _test_ignore_listener(cls, conn): + def handler(signum, frame): + pass + signal.signal(signal.SIGUSR1, handler) + with multiprocessing.connection.Listener() as l: + conn.send(l.address) + a = l.accept() + a.send('welcome') + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_ignore_listener(self): + conn, child_conn = multiprocessing.Pipe() + try: + p = multiprocessing.Process(target=self._test_ignore_listener, + args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + address = conn.recv() + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + time.sleep(0.1) + client = multiprocessing.connection.Client(address) + self.assertEqual(client.recv(), 'welcome') + p.join() + finally: + conn.close() + +class TestStartMethod(unittest.TestCase): + @classmethod + def _check_context(cls, conn): + conn.send(multiprocessing.get_start_method()) + + def check_context(self, ctx): + r, w = ctx.Pipe(duplex=False) + p = ctx.Process(target=self._check_context, args=(w,)) + p.start() + w.close() + child_method = r.recv() + r.close() + p.join() + self.assertEqual(child_method, ctx.get_start_method()) + + def test_context(self): + for method in ('fork', 'spawn', 'forkserver'): + try: + ctx = multiprocessing.get_context(method) + except ValueError: + continue + self.assertEqual(ctx.get_start_method(), method) + self.assertIs(ctx.get_context(), ctx) + self.assertRaises(ValueError, ctx.set_start_method, 'spawn') + self.assertRaises(ValueError, ctx.set_start_method, None) + self.check_context(ctx) + + def test_set_get(self): + multiprocessing.set_forkserver_preload(PRELOAD) + count = 0 + old_method = multiprocessing.get_start_method() + try: + for method in ('fork', 'spawn', 'forkserver'): + try: + multiprocessing.set_start_method(method, force=True) + except ValueError: + continue + self.assertEqual(multiprocessing.get_start_method(), method) + ctx = multiprocessing.get_context() + self.assertEqual(ctx.get_start_method(), method) + self.assertTrue(type(ctx).__name__.lower().startswith(method)) + self.assertTrue( + ctx.Process.__name__.lower().startswith(method)) + self.check_context(multiprocessing) + count += 1 + finally: + multiprocessing.set_start_method(old_method, force=True) + self.assertGreaterEqual(count, 1) + + def test_get_all(self): + methods = multiprocessing.get_all_start_methods() + if sys.platform == 'win32': + self.assertEqual(methods, ['spawn']) + else: + self.assertTrue(methods == ['fork', 'spawn'] or + methods == ['spawn', 'fork'] or + methods == ['fork', 'spawn', 'forkserver'] or + methods == ['spawn', 'fork', 'forkserver']) + + def test_preload_resources(self): + if multiprocessing.get_start_method() != 'forkserver': + self.skipTest("test only relevant for 'forkserver' method") + name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') + rc, out, err = test.support.script_helper.assert_python_ok(name) + out = out.decode() + err = err.decode() + if out.rstrip() != 'ok' or err != '': + print(out) + print(err) + self.fail("failed spawning forkserver or grandchild") + + +@unittest.skipIf(sys.platform == "win32", + "test semantics don't make sense on Windows") +class TestResourceTracker(unittest.TestCase): + + def _test_resource_tracker(self): + # + # Check that killing process does not leak named semaphores + # + cmd = '''if 1: + import time, os, tempfile + import multiprocess as mp + from multiprocess import resource_tracker + from multiprocess.shared_memory import SharedMemory + + mp.set_start_method("spawn") + rand = tempfile._RandomNameSequence() + + + def create_and_register_resource(rtype): + if rtype == "semaphore": + lock = mp.Lock() + return lock, lock._semlock.name + elif rtype == "shared_memory": + sm = SharedMemory(create=True, size=10) + return sm, sm._name + else: + raise ValueError( + "Resource type {{}} not understood".format(rtype)) + + + resource1, rname1 = create_and_register_resource("{rtype}") + resource2, rname2 = create_and_register_resource("{rtype}") + + os.write({w}, rname1.encode("ascii") + b"\\n") + os.write({w}, rname2.encode("ascii") + b"\\n") + + time.sleep(10) + ''' + for rtype in resource_tracker._CLEANUP_FUNCS: + with self.subTest(rtype=rtype): + if rtype == "noop": + # Artefact resource type used by the resource_tracker + continue + r, w = os.pipe() + p = subprocess.Popen([sys.executable, + '-E', '-c', cmd.format(w=w, rtype=rtype)], + pass_fds=[w], + stderr=subprocess.PIPE) + os.close(w) + with open(r, 'rb', closefd=True) as f: + name1 = f.readline().rstrip().decode('ascii') + name2 = f.readline().rstrip().decode('ascii') + _resource_unlink(name1, rtype) + p.terminate() + p.wait() + + deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT + while getattr(time,'monotonic',time.time)() < deadline: + time.sleep(.5) + try: + _resource_unlink(name2, rtype) + except OSError as e: + # docs say it should be ENOENT, but OSX seems to give + # EINVAL + self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) + break + else: + raise AssertionError( + f"A {rtype} resource was leaked after a process was " + f"abruptly terminated.") + err = p.stderr.read().decode('utf-8') + p.stderr.close() + expected = ('resource_tracker: There appear to be 2 leaked {} ' + 'objects'.format( + rtype)) + self.assertRegex(err, expected) + self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) + + def check_resource_tracker_death(self, signum, should_die): + # bpo-31310: if the semaphore tracker process has died, it should + # be restarted implicitly. + from multiprocess.resource_tracker import _resource_tracker + pid = _resource_tracker._pid + if pid is not None: + os.kill(pid, signal.SIGKILL) + support.wait_process(pid, exitcode=-signal.SIGKILL) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + _resource_tracker.ensure_running() + pid = _resource_tracker._pid + + os.kill(pid, signum) + time.sleep(1.0) # give it time to die + + ctx = multiprocessing.get_context("spawn") + with warnings.catch_warnings(record=True) as all_warn: + warnings.simplefilter("always") + sem = ctx.Semaphore() + sem.acquire() + sem.release() + wr = weakref.ref(sem) + # ensure `sem` gets collected, which triggers communication with + # the semaphore tracker + del sem + gc.collect() + self.assertIsNone(wr()) + if should_die: + self.assertEqual(len(all_warn), 1) + the_warn = all_warn[0] + self.assertTrue(issubclass(the_warn.category, UserWarning)) + self.assertTrue("resource_tracker: process died" + in str(the_warn.message)) + else: + self.assertEqual(len(all_warn), 0) + + def test_resource_tracker_sigint(self): + # Catchable signal (ignored by semaphore tracker) + self.check_resource_tracker_death(signal.SIGINT, False) + + def test_resource_tracker_sigterm(self): + # Catchable signal (ignored by semaphore tracker) + self.check_resource_tracker_death(signal.SIGTERM, False) + + def test_resource_tracker_sigkill(self): + # Uncatchable signal. + self.check_resource_tracker_death(signal.SIGKILL, True) + + @staticmethod + def _is_resource_tracker_reused(conn, pid): + from multiprocess.resource_tracker import _resource_tracker + _resource_tracker.ensure_running() + # The pid should be None in the child process, expect for the fork + # context. It should not be a new value. + reused = _resource_tracker._pid in (None, pid) + reused &= _resource_tracker._check_alive() + conn.send(reused) + + def test_resource_tracker_reused(self): + from multiprocess.resource_tracker import _resource_tracker + _resource_tracker.ensure_running() + pid = _resource_tracker._pid + + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=self._is_resource_tracker_reused, + args=(w, pid)) + p.start() + is_resource_tracker_reused = r.recv() + + # Clean up + p.join() + w.close() + r.close() + + self.assertTrue(is_resource_tracker_reused) + + +class TestSimpleQueue(unittest.TestCase): + + @classmethod + def _test_empty(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + # issue 30301, could fail under spawn and forkserver + try: + queue.put(queue.empty()) + queue.put(queue.empty()) + finally: + parent_can_continue.set() + + def test_empty(self): + queue = multiprocessing.SimpleQueue() + child_can_start = multiprocessing.Event() + parent_can_continue = multiprocessing.Event() + + proc = multiprocessing.Process( + target=self._test_empty, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertTrue(queue.empty()) + + child_can_start.set() + parent_can_continue.wait() + + self.assertFalse(queue.empty()) + self.assertEqual(queue.get(), True) + self.assertEqual(queue.get(), False) + self.assertTrue(queue.empty()) + + proc.join() + + def test_close(self): + queue = multiprocessing.SimpleQueue() + queue.close() + # closing a queue twice should not fail + queue.close() + + # Test specific to CPython since it tests private attributes + @test.support.cpython_only + def test_closed(self): + queue = multiprocessing.SimpleQueue() + queue.close() + self.assertTrue(queue._reader.closed) + self.assertTrue(queue._writer.closed) + + +class TestPoolNotLeakOnFailure(unittest.TestCase): + + def test_release_unused_processes(self): + # Issue #19675: During pool creation, if we can't create a process, + # don't leak already created ones. + will_fail_in = 3 + forked_processes = [] + + class FailingForkProcess: + def __init__(self, **kwargs): + self.name = 'Fake Process' + self.exitcode = None + self.state = None + forked_processes.append(self) + + def start(self): + nonlocal will_fail_in + if will_fail_in <= 0: + raise OSError("Manually induced OSError") + will_fail_in -= 1 + self.state = 'started' + + def terminate(self): + self.state = 'stopping' + + def join(self): + if self.state == 'stopping': + self.state = 'stopped' + + def is_alive(self): + return self.state == 'started' or self.state == 'stopping' + + with self.assertRaisesRegex(OSError, 'Manually induced OSError'): + p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( + Process=FailingForkProcess)) + p.close() + p.join() + self.assertFalse( + any(process.is_alive() for process in forked_processes)) + + +@hashlib_helper.requires_hashdigest('md5') +class TestSyncManagerTypes(unittest.TestCase): + """Test all the types which can be shared between a parent and a + child process by using a manager which acts as an intermediary + between them. + + In the following unit-tests the base type is created in the parent + process, the @classmethod represents the worker process and the + shared object is readable and editable between the two. + + # The child. + @classmethod + def _test_list(cls, obj): + assert obj[0] == 5 + assert obj.append(6) + + # The parent. + def test_list(self): + o = self.manager.list() + o.append(5) + self.run_worker(self._test_list, o) + assert o[1] == 6 + """ + manager_class = multiprocessing.managers.SyncManager + + def setUp(self): + self.manager = self.manager_class() + self.manager.start() + self.proc = None + + def tearDown(self): + if self.proc is not None and self.proc.is_alive(): + self.proc.terminate() + self.proc.join() + self.manager.shutdown() + self.manager = None + self.proc = None + + @classmethod + def setUpClass(cls): + support.reap_children() + + tearDownClass = setUpClass + + def wait_proc_exit(self): + # Only the manager process should be returned by active_children() + # but this can take a bit on slow machines, so wait a few seconds + # if there are other children too (see #17395). + join_process(self.proc) + start_time = getattr(time,'monotonic',time.time)() + t = 0.01 + while len(multiprocessing.active_children()) > 1: + time.sleep(t) + t *= 2 + dt = getattr(time,'monotonic',time.time)() - start_time + if dt >= 5.0: + test.support.environment_altered = True + support.print_warning(f"multiprocess.Manager still has " + f"{multiprocessing.active_children()} " + f"active children after {dt} seconds") + break + + def run_worker(self, worker, obj): + self.proc = multiprocessing.Process(target=worker, args=(obj, )) + self.proc.daemon = True + self.proc.start() + self.wait_proc_exit() + self.assertEqual(self.proc.exitcode, 0) + + @classmethod + def _test_event(cls, obj): + assert obj.is_set() + obj.wait() + obj.clear() + obj.wait(0.001) + + def test_event(self): + o = self.manager.Event() + o.set() + self.run_worker(self._test_event, o) + assert not o.is_set() + o.wait(0.001) + + @classmethod + def _test_lock(cls, obj): + obj.acquire() + + def test_lock(self, lname="Lock"): + o = getattr(self.manager, lname)() + self.run_worker(self._test_lock, o) + o.release() + self.assertRaises(RuntimeError, o.release) # already released + + @classmethod + def _test_rlock(cls, obj): + obj.acquire() + obj.release() + + def test_rlock(self, lname="Lock"): + o = getattr(self.manager, lname)() + self.run_worker(self._test_rlock, o) + + @classmethod + def _test_semaphore(cls, obj): + obj.acquire() + + def test_semaphore(self, sname="Semaphore"): + o = getattr(self.manager, sname)() + self.run_worker(self._test_semaphore, o) + o.release() + + def test_bounded_semaphore(self): + self.test_semaphore(sname="BoundedSemaphore") + + @classmethod + def _test_condition(cls, obj): + obj.acquire() + obj.release() + + def test_condition(self): + o = self.manager.Condition() + self.run_worker(self._test_condition, o) + + @classmethod + def _test_barrier(cls, obj): + assert obj.parties == 5 + obj.reset() + + def test_barrier(self): + o = self.manager.Barrier(5) + self.run_worker(self._test_barrier, o) + + @classmethod + def _test_pool(cls, obj): + # TODO: fix https://bugs.python.org/issue35919 + with obj: + pass + + def test_pool(self): + o = self.manager.Pool(processes=4) + self.run_worker(self._test_pool, o) + + @classmethod + def _test_queue(cls, obj): + assert obj.qsize() == 2 + assert obj.full() + assert not obj.empty() + assert obj.get() == 5 + assert not obj.empty() + assert obj.get() == 6 + assert obj.empty() + + def test_queue(self, qname="Queue"): + o = getattr(self.manager, qname)(2) + o.put(5) + o.put(6) + self.run_worker(self._test_queue, o) + assert o.empty() + assert not o.full() + + def test_joinable_queue(self): + self.test_queue("JoinableQueue") + + @classmethod + def _test_list(cls, obj): + assert obj[0] == 5 + assert obj.count(5) == 1 + assert obj.index(5) == 0 + obj.sort() + obj.reverse() + for x in obj: + pass + assert len(obj) == 1 + assert obj.pop(0) == 5 + + def test_list(self): + o = self.manager.list() + o.append(5) + self.run_worker(self._test_list, o) + assert not o + self.assertEqual(len(o), 0) + + @classmethod + def _test_dict(cls, obj): + assert len(obj) == 1 + assert obj['foo'] == 5 + assert obj.get('foo') == 5 + assert list(obj.items()) == [('foo', 5)] + assert list(obj.keys()) == ['foo'] + assert list(obj.values()) == [5] + assert obj.copy() == {'foo': 5} + assert obj.popitem() == ('foo', 5) + + def test_dict(self): + o = self.manager.dict() + o['foo'] = 5 + self.run_worker(self._test_dict, o) + assert not o + self.assertEqual(len(o), 0) + + @classmethod + def _test_value(cls, obj): + assert obj.value == 1 + assert obj.get() == 1 + obj.set(2) + + def test_value(self): + o = self.manager.Value('i', 1) + self.run_worker(self._test_value, o) + self.assertEqual(o.value, 2) + self.assertEqual(o.get(), 2) + + @classmethod + def _test_array(cls, obj): + assert obj[0] == 0 + assert obj[1] == 1 + assert len(obj) == 2 + assert list(obj) == [0, 1] + + def test_array(self): + o = self.manager.Array('i', [0, 1]) + self.run_worker(self._test_array, o) + + @classmethod + def _test_namespace(cls, obj): + assert obj.x == 0 + assert obj.y == 1 + + def test_namespace(self): + o = self.manager.Namespace() + o.x = 0 + o.y = 1 + self.run_worker(self._test_namespace, o) + + +class MiscTestCase(unittest.TestCase): + def test__all__(self): + # Just make sure names in blacklist are excluded + support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, + blacklist=['SUBDEBUG', 'SUBWARNING']) +# +# Mixins +# + +class BaseMixin(object): + @classmethod + def setUpClass(cls): + cls.dangling = (multiprocessing.process._dangling.copy(), + threading._dangling.copy()) + + @classmethod + def tearDownClass(cls): + # bpo-26762: Some multiprocessing objects like Pool create reference + # cycles. Trigger a garbage collection to break these cycles. + test.support.gc_collect() + + processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) + if processes: + test.support.environment_altered = True + support.print_warning(f'Dangling processes: {processes}') + processes = None + + threads = set(threading._dangling) - set(cls.dangling[1]) + if threads: + test.support.environment_altered = True + support.print_warning(f'Dangling threads: {threads}') + threads = None + + +class ProcessesMixin(BaseMixin): + TYPE = 'processes' + Process = multiprocessing.Process + connection = multiprocessing.connection + current_process = staticmethod(multiprocessing.current_process) + parent_process = staticmethod(multiprocessing.parent_process) + active_children = staticmethod(multiprocessing.active_children) + Pool = staticmethod(multiprocessing.Pool) + Pipe = staticmethod(multiprocessing.Pipe) + Queue = staticmethod(multiprocessing.Queue) + JoinableQueue = staticmethod(multiprocessing.JoinableQueue) + Lock = staticmethod(multiprocessing.Lock) + RLock = staticmethod(multiprocessing.RLock) + Semaphore = staticmethod(multiprocessing.Semaphore) + BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) + Condition = staticmethod(multiprocessing.Condition) + Event = staticmethod(multiprocessing.Event) + Barrier = staticmethod(multiprocessing.Barrier) + Value = staticmethod(multiprocessing.Value) + Array = staticmethod(multiprocessing.Array) + RawValue = staticmethod(multiprocessing.RawValue) + RawArray = staticmethod(multiprocessing.RawArray) + + +class ManagerMixin(BaseMixin): + TYPE = 'manager' + Process = multiprocessing.Process + Queue = property(operator.attrgetter('manager.Queue')) + JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) + Lock = property(operator.attrgetter('manager.Lock')) + RLock = property(operator.attrgetter('manager.RLock')) + Semaphore = property(operator.attrgetter('manager.Semaphore')) + BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) + Condition = property(operator.attrgetter('manager.Condition')) + Event = property(operator.attrgetter('manager.Event')) + Barrier = property(operator.attrgetter('manager.Barrier')) + Value = property(operator.attrgetter('manager.Value')) + Array = property(operator.attrgetter('manager.Array')) + list = property(operator.attrgetter('manager.list')) + dict = property(operator.attrgetter('manager.dict')) + Namespace = property(operator.attrgetter('manager.Namespace')) + + @classmethod + def Pool(cls, *args, **kwds): + return cls.manager.Pool(*args, **kwds) + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.manager = multiprocessing.Manager() + + @classmethod + def tearDownClass(cls): + # only the manager process should be returned by active_children() + # but this can take a bit on slow machines, so wait a few seconds + # if there are other children too (see #17395) + start_time = getattr(time,'monotonic',time.time)() + t = 0.01 + while len(multiprocessing.active_children()) > 1: + time.sleep(t) + t *= 2 + dt = getattr(time,'monotonic',time.time)() - start_time + if dt >= 5.0: + test.support.environment_altered = True + support.print_warning(f"multiprocess.Manager still has " + f"{multiprocessing.active_children()} " + f"active children after {dt} seconds") + break + + gc.collect() # do garbage collection + if cls.manager._number_of_objects() != 0: + # This is not really an error since some tests do not + # ensure that all processes which hold a reference to a + # managed object have been joined. + test.support.environment_altered = True + support.print_warning('Shared objects which still exist ' + 'at manager shutdown:') + support.print_warning(cls.manager._debug_info()) + cls.manager.shutdown() + cls.manager.join() + cls.manager = None + + super().tearDownClass() + + +class ThreadsMixin(BaseMixin): + TYPE = 'threads' + Process = multiprocessing.dummy.Process + connection = multiprocessing.dummy.connection + current_process = staticmethod(multiprocessing.dummy.current_process) + active_children = staticmethod(multiprocessing.dummy.active_children) + Pool = staticmethod(multiprocessing.dummy.Pool) + Pipe = staticmethod(multiprocessing.dummy.Pipe) + Queue = staticmethod(multiprocessing.dummy.Queue) + JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) + Lock = staticmethod(multiprocessing.dummy.Lock) + RLock = staticmethod(multiprocessing.dummy.RLock) + Semaphore = staticmethod(multiprocessing.dummy.Semaphore) + BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) + Condition = staticmethod(multiprocessing.dummy.Condition) + Event = staticmethod(multiprocessing.dummy.Event) + Barrier = staticmethod(multiprocessing.dummy.Barrier) + Value = staticmethod(multiprocessing.dummy.Value) + Array = staticmethod(multiprocessing.dummy.Array) + +# +# Functions used to create test cases from the base ones in this module +# + +def install_tests_in_module_dict(remote_globs, start_method): + __module__ = remote_globs['__name__'] + local_globs = globals() + ALL_TYPES = {'processes', 'threads', 'manager'} + + for name, base in local_globs.items(): + if not isinstance(base, type): + continue + if issubclass(base, BaseTestCase): + if base is BaseTestCase: + continue + assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES + for type_ in base.ALLOWED_TYPES: + newname = 'With' + type_.capitalize() + name[1:] + Mixin = local_globs[type_.capitalize() + 'Mixin'] + class Temp(base, Mixin, unittest.TestCase): + pass + if type_ == 'manager': + Temp = hashlib_helper.requires_hashdigest('md5')(Temp) + Temp.__name__ = Temp.__qualname__ = newname + Temp.__module__ = __module__ + remote_globs[newname] = Temp + elif issubclass(base, unittest.TestCase): + class Temp(base, object): + pass + Temp.__name__ = Temp.__qualname__ = name + Temp.__module__ = __module__ + remote_globs[name] = Temp + + dangling = [None, None] + old_start_method = [None] + + def setUpModule(): + multiprocessing.set_forkserver_preload(PRELOAD) + multiprocessing.process._cleanup() + dangling[0] = multiprocessing.process._dangling.copy() + dangling[1] = threading._dangling.copy() + old_start_method[0] = multiprocessing.get_start_method(allow_none=True) + try: + multiprocessing.set_start_method(start_method, force=True) + except ValueError: + raise unittest.SkipTest(start_method + + ' start method not supported') + + if sys.platform.startswith("linux"): + try: + lock = multiprocessing.RLock() + except OSError: + raise unittest.SkipTest("OSError raises on RLock creation, " + "see issue 3111!") + check_enough_semaphores() + util.get_temp_dir() # creates temp directory + multiprocessing.get_logger().setLevel(LOG_LEVEL) + + def tearDownModule(): + need_sleep = False + + # bpo-26762: Some multiprocessing objects like Pool create reference + # cycles. Trigger a garbage collection to break these cycles. + test.support.gc_collect() + + multiprocessing.set_start_method(old_start_method[0], force=True) + # pause a bit so we don't get warning about dangling threads/processes + processes = set(multiprocessing.process._dangling) - set(dangling[0]) + if processes: + need_sleep = True + test.support.environment_altered = True + support.print_warning(f'Dangling processes: {processes}') + processes = None + + threads = set(threading._dangling) - set(dangling[1]) + if threads: + need_sleep = True + test.support.environment_altered = True + support.print_warning(f'Dangling threads: {threads}') + threads = None + + # Sleep 500 ms to give time to child processes to complete. + if need_sleep: + time.sleep(0.5) + + multiprocessing.util._cleanup_tests() + + remote_globs['setUpModule'] = setUpModule + remote_globs['tearDownModule'] = tearDownModule diff --git a/lib/python3.10/site-packages/multiprocess/tests/__main__.py b/lib/python3.10/site-packages/multiprocess/tests/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..38739b6645c0424061d5aa4be5d075a9abe547ff --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/tests/__main__.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2018-2021 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE + +from __future__ import print_function +import glob +import os +try: + import pox + python = pox.which_python(version=True, fullpath=False) or 'python' +except ImportError: + python = 'python' +import subprocess as sp +from sys import platform +shell = platform[:3] == 'win' + +suite = os.path.dirname(__file__) or os.path.curdir +tests = glob.glob(suite + os.path.sep + 'test_*.py') +tests = glob.glob(suite + os.path.sep + '__init__.py') + \ + [i for i in tests if 'main' not in i] + + +if __name__ == '__main__': + + for test in tests: + p = sp.Popen([python, test], shell=shell).wait() + if not p: + print('.', end='') + print('') + diff --git a/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py b/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py new file mode 100644 index 0000000000000000000000000000000000000000..017e010ba0e6fd4372356e7c2bef5b0f23717c1a --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py @@ -0,0 +1,18 @@ +import multiprocessing, sys + +def foo(): + print("123") + +# Because "if __name__ == '__main__'" is missing this will not work +# correctly on Windows. However, we should get a RuntimeError rather +# than the Windows equivalent of a fork bomb. + +if len(sys.argv) > 1: + multiprocessing.set_start_method(sys.argv[1]) +else: + multiprocessing.set_start_method('spawn') + +p = multiprocessing.Process(target=foo) +p.start() +p.join() +sys.exit(p.exitcode) diff --git a/lib/python3.10/site-packages/multiprocess/tests/mp_preload.py b/lib/python3.10/site-packages/multiprocess/tests/mp_preload.py new file mode 100644 index 0000000000000000000000000000000000000000..5314e8f0b216323a9d138015cbd59daf5132ea7d --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/tests/mp_preload.py @@ -0,0 +1,18 @@ +import multiprocessing + +multiprocessing.Lock() + + +def f(): + print("ok") + + +if __name__ == "__main__": + ctx = multiprocessing.get_context("forkserver") + modname = "test.mp_preload" + # Make sure it's importable + __import__(modname) + ctx.set_forkserver_preload([modname]) + proc = ctx.Process(target=f) + proc.start() + proc.join() diff --git a/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py b/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py new file mode 100644 index 0000000000000000000000000000000000000000..a37f93533a7649aa24795ea3ee2dcf7319c1c924 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py @@ -0,0 +1,19 @@ +import unittest +import __init__ as _test_multiprocessing + +import sys +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +if sys.platform == "win32": + raise unittest.SkipTest("fork is not available on Windows") + +if sys.platform == 'darwin': + raise unittest.SkipTest("test may crash on macOS (bpo-33725)") + +_test_multiprocessing.install_tests_in_module_dict(globals(), 'fork') + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py b/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..af8318287df2d9985b94be170086aee59ffae66f --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py @@ -0,0 +1,16 @@ +import unittest +import __init__ as _test_multiprocessing + +import sys +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +if sys.platform == "win32": + raise unittest.SkipTest("forkserver is not available on Windows") + +_test_multiprocessing.install_tests_in_module_dict(globals(), 'forkserver') + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py b/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..0c1a57179efb5f2d341664c5e3cdb093932f1995 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py @@ -0,0 +1,299 @@ +# tests __main__ module handling in multiprocessing +from test import support +# Skip tests if _multiprocessing wasn't built. +support.import_module('_multiprocessing') + +import importlib +import importlib.machinery +import unittest +import sys +import os +import os.path +import py_compile + +from test.support.script_helper import ( + make_pkg, make_script, make_zip_pkg, make_zip_script, + assert_python_ok) + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +# Look up which start methods are available to test +import multiprocess as multiprocessing +AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) + +# Issue #22332: Skip tests if sem_open implementation is broken. +support.import_module('multiprocess.synchronize') + +verbose = support.verbose + +test_source = """\ +# multiprocessing includes all sorts of shenanigans to make __main__ +# attributes accessible in the subprocess in a pickle compatible way. + +# We run the "doesn't work in the interactive interpreter" example from +# the docs to make sure it *does* work from an executed __main__, +# regardless of the invocation mechanism + +import sys +import time +from multiprocess import Pool, set_start_method + +# We use this __main__ defined function in the map call below in order to +# check that multiprocessing in correctly running the unguarded +# code in child processes and then making it available as __main__ +def f(x): + return x*x + +# Check explicit relative imports +if "check_sibling" in __file__: + # We're inside a package and not in a __main__.py file + # so make sure explicit relative imports work correctly + from . import sibling + +if __name__ == '__main__': + start_method = sys.argv[1] + set_start_method(start_method) + results = [] + with Pool(5) as pool: + pool.map_async(f, [1, 2, 3], callback=results.extend) + start_time = getattr(time,'monotonic',time.time)() + while not results: + time.sleep(0.05) + # up to 1 min to report the results + dt = getattr(time,'monotonic',time.time)() - start_time + if dt > 60.0: + raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) + + results.sort() + print(start_method, "->", results) + + pool.join() +""" + +test_source_main_skipped_in_children = """\ +# __main__.py files have an implied "if __name__ == '__main__'" so +# multiprocessing should always skip running them in child processes + +# This means we can't use __main__ defined functions in child processes, +# so we just use "int" as a passthrough operation below + +if __name__ != "__main__": + raise RuntimeError("Should only be called as __main__!") + +import sys +import time +from multiprocess import Pool, set_start_method + +start_method = sys.argv[1] +set_start_method(start_method) +results = [] +with Pool(5) as pool: + pool.map_async(int, [1, 4, 9], callback=results.extend) + start_time = getattr(time,'monotonic',time.time)() + while not results: + time.sleep(0.05) + # up to 1 min to report the results + dt = getattr(time,'monotonic',time.time)() - start_time + if dt > 60.0: + raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) + +results.sort() +print(start_method, "->", results) + +pool.join() +""" + +# These helpers were copied from test_cmd_line_script & tweaked a bit... + +def _make_test_script(script_dir, script_basename, + source=test_source, omit_suffix=False): + to_return = make_script(script_dir, script_basename, + source, omit_suffix) + # Hack to check explicit relative imports + if script_basename == "check_sibling": + make_script(script_dir, "sibling", "") + importlib.invalidate_caches() + return to_return + +def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, + source=test_source, depth=1): + to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, + source, depth) + importlib.invalidate_caches() + return to_return + +# There's no easy way to pass the script directory in to get +# -m to work (avoiding that is the whole point of making +# directories and zipfiles executable!) +# So we fake it for testing purposes with a custom launch script +launch_source = """\ +import sys, os.path, runpy +sys.path.insert(0, %s) +runpy._run_module_as_main(%r) +""" + +def _make_launch_script(script_dir, script_basename, module_name, path=None): + if path is None: + path = "os.path.dirname(__file__)" + else: + path = repr(path) + source = launch_source % (path, module_name) + to_return = make_script(script_dir, script_basename, source) + importlib.invalidate_caches() + return to_return + +class MultiProcessingCmdLineMixin(): + maxDiff = None # Show full tracebacks on subprocess failure + + def setUp(self): + if self.start_method not in AVAILABLE_START_METHODS: + self.skipTest("%r start method not available" % self.start_method) + + def _check_output(self, script_name, exit_code, out, err): + if verbose > 1: + print("Output from test script %r:" % script_name) + print(repr(out)) + self.assertEqual(exit_code, 0) + self.assertEqual(err.decode('utf-8'), '') + expected_results = "%s -> [1, 4, 9]" % self.start_method + self.assertEqual(out.decode('utf-8').strip(), expected_results) + + def _check_script(self, script_name, *cmd_line_switches): + if not __debug__: + cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) + run_args = cmd_line_switches + (script_name, self.start_method) + rc, out, err = assert_python_ok(*run_args, __isolated=False) + self._check_output(script_name, rc, out, err) + + def test_basic_script(self): + with support.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script') + self._check_script(script_name) + + def test_basic_script_no_suffix(self): + with support.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script', + omit_suffix=True) + self._check_script(script_name) + + def test_ipython_workaround(self): + # Some versions of the IPython launch script are missing the + # __name__ = "__main__" guard, and multiprocessing has long had + # a workaround for that case + # See https://github.com/ipython/ipython/issues/4698 + source = test_source_main_skipped_in_children + with support.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'ipython', + source=source) + self._check_script(script_name) + script_no_suffix = _make_test_script(script_dir, 'ipython', + source=source, + omit_suffix=True) + self._check_script(script_no_suffix) + + def test_script_compiled(self): + with support.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script') + py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = support.make_legacy_pyc(script_name) + self._check_script(pyc_file) + + def test_directory(self): + source = self.main_in_children_source + with support.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + self._check_script(script_dir) + + def test_directory_compiled(self): + source = self.main_in_children_source + with support.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = support.make_legacy_pyc(script_name) + self._check_script(script_dir) + + def test_zipfile(self): + source = self.main_in_children_source + with support.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) + self._check_script(zip_name) + + def test_zipfile_compiled(self): + source = self.main_in_children_source + with support.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + compiled_name = py_compile.compile(script_name, doraise=True) + zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) + self._check_script(zip_name) + + def test_module_in_package(self): + with support.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, 'check_sibling') + launch_name = _make_launch_script(script_dir, 'launch', + 'test_pkg.check_sibling') + self._check_script(launch_name) + + def test_module_in_package_in_zipfile(self): + with support.temp_dir() as script_dir: + zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) + self._check_script(launch_name) + + def test_module_in_subpackage_in_zipfile(self): + with support.temp_dir() as script_dir: + zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) + self._check_script(launch_name) + + def test_package(self): + source = self.main_in_children_source + with support.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, '__main__', + source=source) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') + self._check_script(launch_name) + + def test_package_compiled(self): + source = self.main_in_children_source + with support.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, '__main__', + source=source) + compiled_name = py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = support.make_legacy_pyc(script_name) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') + self._check_script(launch_name) + +# Test all supported start methods (setupClass skips as appropriate) + +class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'spawn' + main_in_children_source = test_source_main_skipped_in_children + +class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'fork' + main_in_children_source = test_source + +class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'forkserver' + main_in_children_source = test_source_main_skipped_in_children + +def tearDownModule(): + support.reap_children() + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_spawn.py b/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_spawn.py new file mode 100644 index 0000000000000000000000000000000000000000..d63bc18082376123848406ca08aaa37283a734b7 --- /dev/null +++ b/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_spawn.py @@ -0,0 +1,12 @@ +import unittest +import __init__ as _test_multiprocessing + +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +_test_multiprocessing.install_tests_in_module_dict(globals(), 'spawn') + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/nltk/tokenize/casual.py b/lib/python3.10/site-packages/nltk/tokenize/casual.py new file mode 100644 index 0000000000000000000000000000000000000000..d0545abe50530c20903f8aeaa29fbfc55094e70e --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tokenize/casual.py @@ -0,0 +1,458 @@ +# +# Natural Language Toolkit: Twitter Tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Potts +# Ewan Klein (modifications) +# Pierpaolo Pantone <> (modifications) +# Tom Aarsen <> (modifications) +# URL: +# For license information, see LICENSE.TXT +# + + +""" +Twitter-aware tokenizer, designed to be flexible and easy to adapt to new +domains and tasks. The basic logic is this: + +1. The tuple REGEXPS defines a list of regular expression + strings. + +2. The REGEXPS strings are put, in order, into a compiled + regular expression object called WORD_RE, under the TweetTokenizer + class. + +3. The tokenization is done by WORD_RE.findall(s), where s is the + user-supplied string, inside the tokenize() method of the class + TweetTokenizer. + +4. When instantiating Tokenizer objects, there are several options: + * preserve_case. By default, it is set to True. If it is set to + False, then the tokenizer will downcase everything except for + emoticons. + * reduce_len. By default, it is set to False. It specifies whether + to replace repeated character sequences of length 3 or greater + with sequences of length 3. + * strip_handles. By default, it is set to False. It specifies + whether to remove Twitter handles of text used in the + `tokenize` method. + * match_phone_numbers. By default, it is set to True. It indicates + whether the `tokenize` method should look for phone numbers. +""" + + +###################################################################### + +import html +from typing import List + +import regex # https://github.com/nltk/nltk/issues/2409 + +from nltk.tokenize.api import TokenizerI + +###################################################################### +# The following strings are components in the regular expression +# that is used for tokenizing. It's important that phone_number +# appears first in the final regex (since it can contain whitespace). +# It also could matter that tags comes after emoticons, due to the +# possibility of having text like +# +# <:| and some text >:) +# +# Most importantly, the final element should always be last, since it +# does a last ditch whitespace-based tokenization of whatever is left. + +# ToDo: Update with https://en.wikipedia.org/wiki/List_of_emoticons ? + +# This particular element is used in a couple ways, so we define it +# with a name: +EMOTICONS = r""" + (?: + [<>]? + [:;=8] # eyes + [\-o\*\']? # optional nose + [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth + | + [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth + [\-o\*\']? # optional nose + [:;=8] # eyes + [<>]? + | + {}\[\]]+ # Run of non-space, non-()<>{}[] + | # or + \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...) + | + \([^\s]+?\) # balanced parens, non-recursive: (...) + )+ + (?: # End with: + \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...) + | + \([^\s]+?\) # balanced parens, non-recursive: (...) + | # or + [^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars + ) + | # OR, the following to match naked domains: + (?: + (?\s]+>""", + # ASCII Arrows + r"""[\-]+>|<[\-]+""", + # Twitter username: + r"""(?:@[\w_]+)""", + # Twitter hashtags: + r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""", + # email addresses + r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""", + # Zero-Width-Joiner and Skin tone modifier emojis + """.(?: + [\U0001F3FB-\U0001F3FF]?(?:\u200d.[\U0001F3FB-\U0001F3FF]?)+ + | + [\U0001F3FB-\U0001F3FF] + )""", + # flags + FLAGS, + # Remaining word types: + r""" + (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes. + | + (?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals. + | + (?:[\w_]+) # Words without apostrophes or dashes. + | + (?:\.(?:\s*\.){1,}) # Ellipsis dots. + | + (?:\S) # Everything else that isn't whitespace. + """, +) + +# Take the main components and add a phone regex as the second parameter +REGEXPS_PHONE = (REGEXPS[0], PHONE_REGEX, *REGEXPS[1:]) + +###################################################################### +# TweetTokenizer.WORD_RE and TweetTokenizer.PHONE_WORD_RE represent +# the core tokenizing regexes. They are compiled lazily. + +# WORD_RE performs poorly on these patterns: +HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}") + +# The emoticon string gets its own regex so that we can preserve case for +# them as needed: +EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE) + +# These are for regularizing HTML entities to Unicode: +ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);") + +# For stripping away handles from a tweet: +HANDLES_RE = regex.compile( + r"(?>> from nltk.tokenize.casual import _replace_html_entities + >>> _replace_html_entities(b'Price: £100') + 'Price: \\xa3100' + >>> print(_replace_html_entities(b'Price: £100')) + Price: £100 + >>> + """ + + def _convert_entity(match): + entity_body = match.group(3) + if match.group(1): + try: + if match.group(2): + number = int(entity_body, 16) + else: + number = int(entity_body, 10) + # Numeric character references in the 80-9F range are typically + # interpreted by browsers as representing the characters mapped + # to bytes 80-9F in the Windows-1252 encoding. For more info + # see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets + if 0x80 <= number <= 0x9F: + return bytes((number,)).decode("cp1252") + except ValueError: + number = None + else: + if entity_body in keep: + return match.group(0) + number = html.entities.name2codepoint.get(entity_body) + if number is not None: + try: + return chr(number) + except (ValueError, OverflowError): + pass + + return "" if remove_illegal else match.group(0) + + return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding)) + + +###################################################################### + + +class TweetTokenizer(TokenizerI): + r""" + Tokenizer for tweets. + + >>> from nltk.tokenize import TweetTokenizer + >>> tknzr = TweetTokenizer() + >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--" + >>> tknzr.tokenize(s0) # doctest: +NORMALIZE_WHITESPACE + ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', + '<--'] + + Examples using `strip_handles` and `reduce_len parameters`: + + >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True) + >>> s1 = '@remy: This is waaaaayyyy too much for you!!!!!!' + >>> tknzr.tokenize(s1) + [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!'] + """ + + # Values used to lazily compile WORD_RE and PHONE_WORD_RE, + # which are the core tokenizing regexes. + _WORD_RE = None + _PHONE_WORD_RE = None + + ###################################################################### + + def __init__( + self, + preserve_case=True, + reduce_len=False, + strip_handles=False, + match_phone_numbers=True, + ): + """ + Create a `TweetTokenizer` instance with settings for use in the `tokenize` method. + + :param preserve_case: Flag indicating whether to preserve the casing (capitalisation) + of text used in the `tokenize` method. Defaults to True. + :type preserve_case: bool + :param reduce_len: Flag indicating whether to replace repeated character sequences + of length 3 or greater with sequences of length 3. Defaults to False. + :type reduce_len: bool + :param strip_handles: Flag indicating whether to remove Twitter handles of text used + in the `tokenize` method. Defaults to False. + :type strip_handles: bool + :param match_phone_numbers: Flag indicating whether the `tokenize` method should look + for phone numbers. Defaults to True. + :type match_phone_numbers: bool + """ + self.preserve_case = preserve_case + self.reduce_len = reduce_len + self.strip_handles = strip_handles + self.match_phone_numbers = match_phone_numbers + + def tokenize(self, text: str) -> List[str]: + """Tokenize the input text. + + :param text: str + :rtype: list(str) + :return: a tokenized list of strings; joining this list returns\ + the original string if `preserve_case=False`. + """ + # Fix HTML character entities: + text = _replace_html_entities(text) + # Remove username handles + if self.strip_handles: + text = remove_handles(text) + # Normalize word lengthening + if self.reduce_len: + text = reduce_lengthening(text) + # Shorten problematic sequences of characters + safe_text = HANG_RE.sub(r"\1\1\1", text) + # Recognise phone numbers during tokenization + if self.match_phone_numbers: + words = self.PHONE_WORD_RE.findall(safe_text) + else: + words = self.WORD_RE.findall(safe_text) + # Possibly alter the case, but avoid changing emoticons like :D into :d: + if not self.preserve_case: + words = list( + map((lambda x: x if EMOTICON_RE.search(x) else x.lower()), words) + ) + return words + + @property + def WORD_RE(self) -> "regex.Pattern": + """Core TweetTokenizer regex""" + # Compiles the regex for this and all future instantiations of TweetTokenizer. + if not type(self)._WORD_RE: + type(self)._WORD_RE = regex.compile( + f"({'|'.join(REGEXPS)})", + regex.VERBOSE | regex.I | regex.UNICODE, + ) + return type(self)._WORD_RE + + @property + def PHONE_WORD_RE(self) -> "regex.Pattern": + """Secondary core TweetTokenizer regex""" + # Compiles the regex for this and all future instantiations of TweetTokenizer. + if not type(self)._PHONE_WORD_RE: + type(self)._PHONE_WORD_RE = regex.compile( + f"({'|'.join(REGEXPS_PHONE)})", + regex.VERBOSE | regex.I | regex.UNICODE, + ) + return type(self)._PHONE_WORD_RE + + +###################################################################### +# Normalization Functions +###################################################################### + + +def reduce_lengthening(text): + """ + Replace repeated character sequences of length 3 or greater with sequences + of length 3. + """ + pattern = regex.compile(r"(.)\1{2,}") + return pattern.sub(r"\1\1\1", text) + + +def remove_handles(text): + """ + Remove Twitter username handles from text. + """ + # Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly + return HANDLES_RE.sub(" ", text) + + +###################################################################### +# Tokenization Function +###################################################################### + + +def casual_tokenize( + text, + preserve_case=True, + reduce_len=False, + strip_handles=False, + match_phone_numbers=True, +): + """ + Convenience function for wrapping the tokenizer. + """ + return TweetTokenizer( + preserve_case=preserve_case, + reduce_len=reduce_len, + strip_handles=strip_handles, + match_phone_numbers=match_phone_numbers, + ).tokenize(text) + + +############################################################################### diff --git a/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py b/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py new file mode 100644 index 0000000000000000000000000000000000000000..547827cefe1af65209e1f44237b7ac160b167920 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py @@ -0,0 +1,147 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Hench +# Alex Estes +# URL: +# For license information, see LICENSE.TXT + +""" +The Legality Principle is a language agnostic principle maintaining that syllable +onsets and codas (the beginning and ends of syllables not including the vowel) +are only legal if they are found as word onsets or codas in the language. The English +word ''admit'' must then be syllabified as ''ad-mit'' since ''dm'' is not found +word-initially in the English language (Bartlett et al.). This principle was first proposed +in Daniel Kahn's 1976 dissertation, ''Syllable-based generalizations in English phonology''. + +Kahn further argues that there is a ''strong tendency to syllabify in such a way that +initial clusters are of maximal length, consistent with the general constraints on +word-initial consonant clusters.'' Consequently, in addition to being legal onsets, +the longest legal onset is preferable---''Onset Maximization''. + +The default implementation assumes an English vowel set, but the `vowels` attribute +can be set to IPA or any other alphabet's vowel set for the use-case. +Both a valid set of vowels as well as a text corpus of words in the language +are necessary to determine legal onsets and subsequently syllabify words. + +The legality principle with onset maximization is a universal syllabification algorithm, +but that does not mean it performs equally across languages. Bartlett et al. (2009) +is a good benchmark for English accuracy if utilizing IPA (pg. 311). + +References: + +- Otto Jespersen. 1904. Lehrbuch der Phonetik. + Leipzig, Teubner. Chapter 13, Silbe, pp. 185-203. +- Theo Vennemann, ''On the Theory of Syllabic Phonology,'' 1972, p. 11. +- Daniel Kahn, ''Syllable-based generalizations in English phonology'', (PhD diss., MIT, 1976). +- Elisabeth Selkirk. 1984. On the major class features and syllable theory. + In Aronoff & Oehrle (eds.) Language Sound Structure: Studies in Phonology. + Cambridge, MIT Press. pp. 107-136. +- Jeremy Goslin and Ulrich Frauenfelder. 2001. A comparison of theoretical and human syllabification. Language and Speech, 44:409–436. +- Susan Bartlett, et al. 2009. On the Syllabification of Phonemes. + In HLT-NAACL. pp. 308-316. +- Christopher Hench. 2017. Resonances in Middle High German: New Methodologies in Prosody. UC Berkeley. +""" + +from collections import Counter + +from nltk.tokenize.api import TokenizerI + + +class LegalitySyllableTokenizer(TokenizerI): + """ + Syllabifies words based on the Legality Principle and Onset Maximization. + + >>> from nltk.tokenize import LegalitySyllableTokenizer + >>> from nltk import word_tokenize + >>> from nltk.corpus import words + >>> text = "This is a wonderful sentence." + >>> text_words = word_tokenize(text) + >>> LP = LegalitySyllableTokenizer(words.words()) + >>> [LP.tokenize(word) for word in text_words] + [['This'], ['is'], ['a'], ['won', 'der', 'ful'], ['sen', 'ten', 'ce'], ['.']] + """ + + def __init__( + self, tokenized_source_text, vowels="aeiouy", legal_frequency_threshold=0.001 + ): + """ + :param tokenized_source_text: List of valid tokens in the language + :type tokenized_source_text: list(str) + :param vowels: Valid vowels in language or IPA representation + :type vowels: str + :param legal_frequency_threshold: Lowest frequency of all onsets to be considered a legal onset + :type legal_frequency_threshold: float + """ + self.legal_frequency_threshold = legal_frequency_threshold + self.vowels = vowels + self.legal_onsets = self.find_legal_onsets(tokenized_source_text) + + def find_legal_onsets(self, words): + """ + Gathers all onsets and then return only those above the frequency threshold + + :param words: List of words in a language + :type words: list(str) + :return: Set of legal onsets + :rtype: set(str) + """ + onsets = [self.onset(word) for word in words] + legal_onsets = [ + k + for k, v in Counter(onsets).items() + if (v / len(onsets)) > self.legal_frequency_threshold + ] + return set(legal_onsets) + + def onset(self, word): + """ + Returns consonant cluster of word, i.e. all characters until the first vowel. + + :param word: Single word or token + :type word: str + :return: String of characters of onset + :rtype: str + """ + onset = "" + for c in word.lower(): + if c in self.vowels: + return onset + else: + onset += c + return onset + + def tokenize(self, token): + """ + Apply the Legality Principle in combination with + Onset Maximization to return a list of syllables. + + :param token: Single word or token + :type token: str + :return syllable_list: Single word or token broken up into syllables. + :rtype: list(str) + """ + syllables = [] + syllable, current_onset = "", "" + vowel, onset = False, False + for char in token[::-1]: + char_lower = char.lower() + if not vowel: + syllable += char + vowel = bool(char_lower in self.vowels) + else: + if char_lower + current_onset[::-1] in self.legal_onsets: + syllable += char + current_onset += char_lower + onset = True + elif char_lower in self.vowels and not onset: + syllable += char + current_onset += char_lower + else: + syllables.append(syllable) + syllable = char + current_onset = "" + vowel = bool(char_lower in self.vowels) + syllables.append(syllable) + syllables_ordered = [syllable[::-1] for syllable in syllables][::-1] + return syllables_ordered diff --git a/lib/python3.10/site-packages/nltk/tokenize/nist.py b/lib/python3.10/site-packages/nltk/tokenize/nist.py new file mode 100644 index 0000000000000000000000000000000000000000..b9e13dad28b81d91891a838d89bcdf5a0c1ad086 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tokenize/nist.py @@ -0,0 +1,179 @@ +# Natural Language Toolkit: Python port of the mteval-v14.pl tokenizer. +# +# Copyright (C) 2001-2015 NLTK Project +# Author: Liling Tan (ported from ftp://jaguar.ncsl.nist.gov/mt/resources/mteval-v14.pl) +# Contributors: Ozan Caglayan, Wiktor Stribizew +# +# URL: +# For license information, see LICENSE.TXT + +""" +This is a NLTK port of the tokenizer used in the NIST BLEU evaluation script, +https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L926 +which was also ported into Python in +https://github.com/lium-lst/nmtpy/blob/master/nmtpy/metrics/mtevalbleu.py#L162 +""" + + +import io +import re + +from nltk.corpus import perluniprops +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import xml_unescape + + +class NISTTokenizer(TokenizerI): + """ + This NIST tokenizer is sentence-based instead of the original + paragraph-based tokenization from mteval-14.pl; The sentence-based + tokenization is consistent with the other tokenizers available in NLTK. + + >>> from nltk.tokenize.nist import NISTTokenizer + >>> nist = NISTTokenizer() + >>> s = "Good muffins cost $3.88 in New York." + >>> expected_lower = [u'good', u'muffins', u'cost', u'$', u'3.88', u'in', u'new', u'york', u'.'] + >>> expected_cased = [u'Good', u'muffins', u'cost', u'$', u'3.88', u'in', u'New', u'York', u'.'] + >>> nist.tokenize(s, lowercase=False) == expected_cased + True + >>> nist.tokenize(s, lowercase=True) == expected_lower # Lowercased. + True + + The international_tokenize() is the preferred function when tokenizing + non-european text, e.g. + + >>> from nltk.tokenize.nist import NISTTokenizer + >>> nist = NISTTokenizer() + + # Input strings. + >>> albb = u'Alibaba Group Holding Limited (Chinese: 阿里巴巴集团控股 有限公司) us a Chinese e-commerce company...' + >>> amz = u'Amazon.com, Inc. (/ˈæməzɒn/) is an American electronic commerce...' + >>> rkt = u'Rakuten, Inc. (楽天株式会社 Rakuten Kabushiki-gaisha) is a Japanese electronic commerce and Internet company based in Tokyo.' + + # Expected tokens. + >>> expected_albb = [u'Alibaba', u'Group', u'Holding', u'Limited', u'(', u'Chinese', u':', u'\u963f\u91cc\u5df4\u5df4\u96c6\u56e2\u63a7\u80a1', u'\u6709\u9650\u516c\u53f8', u')'] + >>> expected_amz = [u'Amazon', u'.', u'com', u',', u'Inc', u'.', u'(', u'/', u'\u02c8\xe6', u'm'] + >>> expected_rkt = [u'Rakuten', u',', u'Inc', u'.', u'(', u'\u697d\u5929\u682a\u5f0f\u4f1a\u793e', u'Rakuten', u'Kabushiki', u'-', u'gaisha'] + + >>> nist.international_tokenize(albb)[:10] == expected_albb + True + >>> nist.international_tokenize(amz)[:10] == expected_amz + True + >>> nist.international_tokenize(rkt)[:10] == expected_rkt + True + + # Doctest for patching issue #1926 + >>> sent = u'this is a foo\u2604sentence.' + >>> expected_sent = [u'this', u'is', u'a', u'foo', u'\u2604', u'sentence', u'.'] + >>> nist.international_tokenize(sent) == expected_sent + True + """ + + # Strip "skipped" tags + STRIP_SKIP = re.compile(""), "" + # Strip end-of-line hyphenation and join lines + STRIP_EOL_HYPHEN = re.compile("\u2028"), " " + # Tokenize punctuation. + PUNCT = re.compile(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])"), " \\1 " + # Tokenize period and comma unless preceded by a digit. + PERIOD_COMMA_PRECEED = re.compile(r"([^0-9])([\.,])"), "\\1 \\2 " + # Tokenize period and comma unless followed by a digit. + PERIOD_COMMA_FOLLOW = re.compile(r"([\.,])([^0-9])"), " \\1 \\2" + # Tokenize dash when preceded by a digit + DASH_PRECEED_DIGIT = re.compile("([0-9])(-)"), "\\1 \\2 " + + LANG_DEPENDENT_REGEXES = [ + PUNCT, + PERIOD_COMMA_PRECEED, + PERIOD_COMMA_FOLLOW, + DASH_PRECEED_DIGIT, + ] + + # Perluniprops characters used in NIST tokenizer. + pup_number = str("".join(set(perluniprops.chars("Number")))) # i.e. \p{N} + pup_punct = str("".join(set(perluniprops.chars("Punctuation")))) # i.e. \p{P} + pup_symbol = str("".join(set(perluniprops.chars("Symbol")))) # i.e. \p{S} + + # Python regexes needs to escape some special symbols, see + # see https://stackoverflow.com/q/45670950/610569 + number_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_number) + punct_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_punct) + symbol_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_symbol) + + # Note: In the original perl implementation, \p{Z} and \p{Zl} were used to + # (i) strip trailing and heading spaces and + # (ii) de-deuplicate spaces. + # In Python, this would do: ' '.join(str.strip().split()) + # Thus, the next two lines were commented out. + # Line_Separator = str(''.join(perluniprops.chars('Line_Separator'))) # i.e. \p{Zl} + # Separator = str(''.join(perluniprops.chars('Separator'))) # i.e. \p{Z} + + # Pads non-ascii strings with space. + NONASCII = re.compile("([\x00-\x7f]+)"), r" \1 " + # Tokenize any punctuation unless followed AND preceded by a digit. + PUNCT_1 = ( + re.compile(f"([{number_regex}])([{punct_regex}])"), + "\\1 \\2 ", + ) + PUNCT_2 = ( + re.compile(f"([{punct_regex}])([{number_regex}])"), + " \\1 \\2", + ) + # Tokenize symbols + SYMBOLS = re.compile(f"([{symbol_regex}])"), " \\1 " + + INTERNATIONAL_REGEXES = [NONASCII, PUNCT_1, PUNCT_2, SYMBOLS] + + def lang_independent_sub(self, text): + """Performs the language independent string substituitions.""" + # It's a strange order of regexes. + # It'll be better to unescape after STRIP_EOL_HYPHEN + # but let's keep it close to the original NIST implementation. + regexp, substitution = self.STRIP_SKIP + text = regexp.sub(substitution, text) + text = xml_unescape(text) + regexp, substitution = self.STRIP_EOL_HYPHEN + text = regexp.sub(substitution, text) + return text + + def tokenize(self, text, lowercase=False, western_lang=True, return_str=False): + text = str(text) + # Language independent regex. + text = self.lang_independent_sub(text) + # Language dependent regex. + if western_lang: + # Pad string with whitespace. + text = " " + text + " " + if lowercase: + text = text.lower() + for regexp, substitution in self.LANG_DEPENDENT_REGEXES: + text = regexp.sub(substitution, text) + # Remove contiguous whitespaces. + text = " ".join(text.split()) + # Finally, strips heading and trailing spaces + # and converts output string into unicode. + text = str(text.strip()) + return text if return_str else text.split() + + def international_tokenize( + self, text, lowercase=False, split_non_ascii=True, return_str=False + ): + text = str(text) + # Different from the 'normal' tokenize(), STRIP_EOL_HYPHEN is applied + # first before unescaping. + regexp, substitution = self.STRIP_SKIP + text = regexp.sub(substitution, text) + regexp, substitution = self.STRIP_EOL_HYPHEN + text = regexp.sub(substitution, text) + text = xml_unescape(text) + + if lowercase: + text = text.lower() + + for regexp, substitution in self.INTERNATIONAL_REGEXES: + text = regexp.sub(substitution, text) + + # Make sure that there's only one space only between words. + # Strip leading and trailing spaces. + text = " ".join(text.strip().split()) + return text if return_str else text.split() diff --git a/lib/python3.10/site-packages/nltk/tokenize/punkt.py b/lib/python3.10/site-packages/nltk/tokenize/punkt.py new file mode 100644 index 0000000000000000000000000000000000000000..129bd49c270c301d97a44eec5e58d7e19f15cabe --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tokenize/punkt.py @@ -0,0 +1,1767 @@ +# Natural Language Toolkit: Punkt sentence tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Algorithm: Kiss & Strunk (2006) +# Author: Willy (original Python port) +# Steven Bird (additions) +# Edward Loper (rewrite) +# Joel Nothman (almost rewrite) +# Arthur Darcet (fixes) +# Tom Aarsen <> (tackle ReDoS & performance issues) +# URL: +# For license information, see LICENSE.TXT + +r""" +Punkt Sentence Tokenizer + +This tokenizer divides a text into a list of sentences +by using an unsupervised algorithm to build a model for abbreviation +words, collocations, and words that start sentences. It must be +trained on a large collection of plaintext in the target language +before it can be used. + +The NLTK data package includes a pre-trained Punkt tokenizer for +English. + + >>> import nltk.data + >>> text = ''' + ... Punkt knows that the periods in Mr. Smith and Johann S. Bach + ... do not mark sentence boundaries. And sometimes sentences + ... can start with non-capitalized words. i is a good variable + ... name. + ... ''' + >>> sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') + >>> print('\n-----\n'.join(sent_detector.tokenize(text.strip()))) + Punkt knows that the periods in Mr. Smith and Johann S. Bach + do not mark sentence boundaries. + ----- + And sometimes sentences + can start with non-capitalized words. + ----- + i is a good variable + name. + +(Note that whitespace from the original text, including newlines, is +retained in the output.) + +Punctuation following sentences is also included by default +(from NLTK 3.0 onwards). It can be excluded with the realign_boundaries +flag. + + >>> text = ''' + ... (How does it deal with this parenthesis?) "It should be part of the + ... previous sentence." "(And the same with this one.)" ('And this one!') + ... "('(And (this)) '?)" [(and this. )] + ... ''' + >>> print('\n-----\n'.join( + ... sent_detector.tokenize(text.strip()))) + (How does it deal with this parenthesis?) + ----- + "It should be part of the + previous sentence." + ----- + "(And the same with this one.)" + ----- + ('And this one!') + ----- + "('(And (this)) '?)" + ----- + [(and this. )] + >>> print('\n-----\n'.join( + ... sent_detector.tokenize(text.strip(), realign_boundaries=False))) + (How does it deal with this parenthesis? + ----- + ) "It should be part of the + previous sentence. + ----- + " "(And the same with this one. + ----- + )" ('And this one! + ----- + ') + "('(And (this)) '? + ----- + )" [(and this. + ----- + )] + +However, Punkt is designed to learn parameters (a list of abbreviations, etc.) +unsupervised from a corpus similar to the target domain. The pre-packaged models +may therefore be unsuitable: use ``PunktSentenceTokenizer(text)`` to learn +parameters from the given text. + +:class:`.PunktTrainer` learns parameters such as a list of abbreviations +(without supervision) from portions of text. Using a ``PunktTrainer`` directly +allows for incremental training and modification of the hyper-parameters used +to decide what is considered an abbreviation, etc. + +The algorithm for this tokenizer is described in:: + + Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence + Boundary Detection. Computational Linguistics 32: 485-525. +""" + +# TODO: Make orthographic heuristic less susceptible to overtraining +# TODO: Frequent sentence starters optionally exclude always-capitalised words +# FIXME: Problem with ending string with e.g. '!!!' -> '!! !' + +import math +import re +import string +from collections import defaultdict +from typing import Any, Dict, Iterator, List, Match, Optional, Tuple, Union + +from nltk.probability import FreqDist +from nltk.tokenize.api import TokenizerI + +###################################################################### +# { Orthographic Context Constants +###################################################################### +# The following constants are used to describe the orthographic +# contexts in which a word can occur. BEG=beginning, MID=middle, +# UNK=unknown, UC=uppercase, LC=lowercase, NC=no case. + +_ORTHO_BEG_UC = 1 << 1 +"""Orthographic context: beginning of a sentence with upper case.""" + +_ORTHO_MID_UC = 1 << 2 +"""Orthographic context: middle of a sentence with upper case.""" + +_ORTHO_UNK_UC = 1 << 3 +"""Orthographic context: unknown position in a sentence with upper case.""" + +_ORTHO_BEG_LC = 1 << 4 +"""Orthographic context: beginning of a sentence with lower case.""" + +_ORTHO_MID_LC = 1 << 5 +"""Orthographic context: middle of a sentence with lower case.""" + +_ORTHO_UNK_LC = 1 << 6 +"""Orthographic context: unknown position in a sentence with lower case.""" + +_ORTHO_UC = _ORTHO_BEG_UC + _ORTHO_MID_UC + _ORTHO_UNK_UC +"""Orthographic context: occurs with upper case.""" + +_ORTHO_LC = _ORTHO_BEG_LC + _ORTHO_MID_LC + _ORTHO_UNK_LC +"""Orthographic context: occurs with lower case.""" + +_ORTHO_MAP = { + ("initial", "upper"): _ORTHO_BEG_UC, + ("internal", "upper"): _ORTHO_MID_UC, + ("unknown", "upper"): _ORTHO_UNK_UC, + ("initial", "lower"): _ORTHO_BEG_LC, + ("internal", "lower"): _ORTHO_MID_LC, + ("unknown", "lower"): _ORTHO_UNK_LC, +} +"""A map from context position and first-letter case to the +appropriate orthographic context flag.""" + +# } (end orthographic context constants) +###################################################################### + +###################################################################### +# { Decision reasons for debugging +###################################################################### + +REASON_DEFAULT_DECISION = "default decision" +REASON_KNOWN_COLLOCATION = "known collocation (both words)" +REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC = "abbreviation + orthographic heuristic" +REASON_ABBR_WITH_SENTENCE_STARTER = "abbreviation + frequent sentence starter" +REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC = "initial + orthographic heuristic" +REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC = "initial + orthographic heuristic" +REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC = ( + "initial + special orthographic heuristic" +) + + +# } (end decision reasons for debugging) +###################################################################### + +###################################################################### +# { Language-dependent variables +###################################################################### + + +class PunktLanguageVars: + """ + Stores variables, mostly regular expressions, which may be + language-dependent for correct application of the algorithm. + An extension of this class may modify its properties to suit + a language other than English; an instance can then be passed + as an argument to PunktSentenceTokenizer and PunktTrainer + constructors. + """ + + __slots__ = ("_re_period_context", "_re_word_tokenizer") + + def __getstate__(self): + # All modifications to the class are performed by inheritance. + # Non-default parameters to be pickled must be defined in the inherited + # class. + return 1 + + def __setstate__(self, state): + return 1 + + sent_end_chars = (".", "?", "!") + """Characters which are candidates for sentence boundaries""" + + @property + def _re_sent_end_chars(self): + return "[%s]" % re.escape("".join(self.sent_end_chars)) + + internal_punctuation = ",:;" # might want to extend this.. + """sentence internal punctuation, which indicates an abbreviation if + preceded by a period-final token.""" + + re_boundary_realignment = re.compile(r'["\')\]}]+?(?:\s+|(?=--)|$)', re.MULTILINE) + """Used to realign punctuation that should be included in a sentence + although it follows the period (or ?, !).""" + + _re_word_start = r"[^\(\"\`{\[:;&\#\*@\)}\]\-,]" + """Excludes some characters from starting word tokens""" + + @property + def _re_non_word_chars(self): + return r"(?:[)\";}\]\*:@\'\({\[%s])" % re.escape( + "".join(set(self.sent_end_chars) - {"."}) + ) + + """Characters that cannot appear within words""" + + _re_multi_char_punct = r"(?:\-{2,}|\.{2,}|(?:\.\s){2,}\.)" + """Hyphen and ellipsis are multi-character punctuation""" + + _word_tokenize_fmt = r"""( + %(MultiChar)s + | + (?=%(WordStart)s)\S+? # Accept word characters until end is found + (?= # Sequences marking a word's end + \s| # White-space + $| # End-of-string + %(NonWord)s|%(MultiChar)s| # Punctuation + ,(?=$|\s|%(NonWord)s|%(MultiChar)s) # Comma if at end of word + ) + | + \S + )""" + """Format of a regular expression to split punctuation from words, + excluding period.""" + + def _word_tokenizer_re(self): + """Compiles and returns a regular expression for word tokenization""" + try: + return self._re_word_tokenizer + except AttributeError: + self._re_word_tokenizer = re.compile( + self._word_tokenize_fmt + % { + "NonWord": self._re_non_word_chars, + "MultiChar": self._re_multi_char_punct, + "WordStart": self._re_word_start, + }, + re.UNICODE | re.VERBOSE, + ) + return self._re_word_tokenizer + + def word_tokenize(self, s): + """Tokenize a string to split off punctuation other than periods""" + return self._word_tokenizer_re().findall(s) + + _period_context_fmt = r""" + %(SentEndChars)s # a potential sentence ending + (?=(?P + %(NonWord)s # either other punctuation + | + \s+(?P\S+) # or whitespace and some other token + ))""" + """Format of a regular expression to find contexts including possible + sentence boundaries. Matches token which the possible sentence boundary + ends, and matches the following token within a lookahead expression.""" + + def period_context_re(self): + """Compiles and returns a regular expression to find contexts + including possible sentence boundaries.""" + try: + return self._re_period_context + except: + self._re_period_context = re.compile( + self._period_context_fmt + % { + "NonWord": self._re_non_word_chars, + "SentEndChars": self._re_sent_end_chars, + }, + re.UNICODE | re.VERBOSE, + ) + return self._re_period_context + + +_re_non_punct = re.compile(r"[^\W\d]", re.UNICODE) +"""Matches token types that are not merely punctuation. (Types for +numeric tokens are changed to ##number## and hence contain alpha.)""" + + +# } +###################################################################### + + +# //////////////////////////////////////////////////////////// +# { Helper Functions +# //////////////////////////////////////////////////////////// + + +def _pair_iter(iterator): + """ + Yields pairs of tokens from the given iterator such that each input + token will appear as the first element in a yielded tuple. The last + pair will have None as its second element. + """ + iterator = iter(iterator) + try: + prev = next(iterator) + except StopIteration: + return + for el in iterator: + yield (prev, el) + prev = el + yield (prev, None) + + +###################################################################### +# { Punkt Parameters +###################################################################### + + +class PunktParameters: + """Stores data used to perform sentence boundary detection with Punkt.""" + + def __init__(self): + self.abbrev_types = set() + """A set of word types for known abbreviations.""" + + self.collocations = set() + """A set of word type tuples for known common collocations + where the first word ends in a period. E.g., ('S.', 'Bach') + is a common collocation in a text that discusses 'Johann + S. Bach'. These count as negative evidence for sentence + boundaries.""" + + self.sent_starters = set() + """A set of word types for words that often appear at the + beginning of sentences.""" + + self.ortho_context = defaultdict(int) + """A dictionary mapping word types to the set of orthographic + contexts that word type appears in. Contexts are represented + by adding orthographic context flags: ...""" + + def clear_abbrevs(self): + self.abbrev_types = set() + + def clear_collocations(self): + self.collocations = set() + + def clear_sent_starters(self): + self.sent_starters = set() + + def clear_ortho_context(self): + self.ortho_context = defaultdict(int) + + def add_ortho_context(self, typ, flag): + self.ortho_context[typ] |= flag + + def _debug_ortho_context(self, typ): + context = self.ortho_context[typ] + if context & _ORTHO_BEG_UC: + yield "BEG-UC" + if context & _ORTHO_MID_UC: + yield "MID-UC" + if context & _ORTHO_UNK_UC: + yield "UNK-UC" + if context & _ORTHO_BEG_LC: + yield "BEG-LC" + if context & _ORTHO_MID_LC: + yield "MID-LC" + if context & _ORTHO_UNK_LC: + yield "UNK-LC" + + +###################################################################### +# { PunktToken +###################################################################### + + +class PunktToken: + """Stores a token of text with annotations produced during + sentence boundary detection.""" + + _properties = ["parastart", "linestart", "sentbreak", "abbr", "ellipsis"] + __slots__ = ["tok", "type", "period_final"] + _properties + + def __init__(self, tok, **params): + self.tok = tok + self.type = self._get_type(tok) + self.period_final = tok.endswith(".") + + for prop in self._properties: + setattr(self, prop, None) + for k in params: + setattr(self, k, params[k]) + + # //////////////////////////////////////////////////////////// + # { Regular expressions for properties + # //////////////////////////////////////////////////////////// + # Note: [A-Za-z] is approximated by [^\W\d] in the general case. + _RE_ELLIPSIS = re.compile(r"\.\.+$") + _RE_NUMERIC = re.compile(r"^-?[\.,]?\d[\d,\.-]*\.?$") + _RE_INITIAL = re.compile(r"[^\W\d]\.$", re.UNICODE) + _RE_ALPHA = re.compile(r"[^\W\d]+$", re.UNICODE) + + # //////////////////////////////////////////////////////////// + # { Derived properties + # //////////////////////////////////////////////////////////// + + def _get_type(self, tok): + """Returns a case-normalized representation of the token.""" + return self._RE_NUMERIC.sub("##number##", tok.lower()) + + @property + def type_no_period(self): + """ + The type with its final period removed if it has one. + """ + if len(self.type) > 1 and self.type[-1] == ".": + return self.type[:-1] + return self.type + + @property + def type_no_sentperiod(self): + """ + The type with its final period removed if it is marked as a + sentence break. + """ + if self.sentbreak: + return self.type_no_period + return self.type + + @property + def first_upper(self): + """True if the token's first character is uppercase.""" + return self.tok[0].isupper() + + @property + def first_lower(self): + """True if the token's first character is lowercase.""" + return self.tok[0].islower() + + @property + def first_case(self): + if self.first_lower: + return "lower" + if self.first_upper: + return "upper" + return "none" + + @property + def is_ellipsis(self): + """True if the token text is that of an ellipsis.""" + return self._RE_ELLIPSIS.match(self.tok) + + @property + def is_number(self): + """True if the token text is that of a number.""" + return self.type.startswith("##number##") + + @property + def is_initial(self): + """True if the token text is that of an initial.""" + return self._RE_INITIAL.match(self.tok) + + @property + def is_alpha(self): + """True if the token text is all alphabetic.""" + return self._RE_ALPHA.match(self.tok) + + @property + def is_non_punct(self): + """True if the token is either a number or is alphabetic.""" + return _re_non_punct.search(self.type) + + # //////////////////////////////////////////////////////////// + # { String representation + # //////////////////////////////////////////////////////////// + + def __repr__(self): + """ + A string representation of the token that can reproduce it + with eval(), which lists all the token's non-default + annotations. + """ + typestr = " type=%s," % repr(self.type) if self.type != self.tok else "" + + propvals = ", ".join( + f"{p}={repr(getattr(self, p))}" + for p in self._properties + if getattr(self, p) + ) + + return "{}({},{} {})".format( + self.__class__.__name__, + repr(self.tok), + typestr, + propvals, + ) + + def __str__(self): + """ + A string representation akin to that used by Kiss and Strunk. + """ + res = self.tok + if self.abbr: + res += "" + if self.ellipsis: + res += "" + if self.sentbreak: + res += "" + return res + + +###################################################################### +# { Punkt base class +###################################################################### + + +class PunktBaseClass: + """ + Includes common components of PunktTrainer and PunktSentenceTokenizer. + """ + + def __init__(self, lang_vars=None, token_cls=PunktToken, params=None): + if lang_vars is None: + lang_vars = PunktLanguageVars() + if params is None: + params = PunktParameters() + self._params = params + self._lang_vars = lang_vars + self._Token = token_cls + """The collection of parameters that determines the behavior + of the punkt tokenizer.""" + + # //////////////////////////////////////////////////////////// + # { Word tokenization + # //////////////////////////////////////////////////////////// + + def _tokenize_words(self, plaintext): + """ + Divide the given text into tokens, using the punkt word + segmentation regular expression, and generate the resulting list + of tokens augmented as three-tuples with two boolean values for whether + the given token occurs at the start of a paragraph or a new line, + respectively. + """ + parastart = False + for line in plaintext.split("\n"): + if line.strip(): + line_toks = iter(self._lang_vars.word_tokenize(line)) + + try: + tok = next(line_toks) + except StopIteration: + continue + + yield self._Token(tok, parastart=parastart, linestart=True) + parastart = False + + for tok in line_toks: + yield self._Token(tok) + else: + parastart = True + + # //////////////////////////////////////////////////////////// + # { Annotation Procedures + # //////////////////////////////////////////////////////////// + + def _annotate_first_pass( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Perform the first pass of annotation, which makes decisions + based purely based on the word type of each word: + + - '?', '!', and '.' are marked as sentence breaks. + - sequences of two or more periods are marked as ellipsis. + - any word ending in '.' that's a known abbreviation is + marked as an abbreviation. + - any other word ending in '.' is marked as a sentence break. + + Return these annotations as a tuple of three sets: + + - sentbreak_toks: The indices of all sentence breaks. + - abbrev_toks: The indices of all abbreviations. + - ellipsis_toks: The indices of all ellipsis marks. + """ + for aug_tok in tokens: + self._first_pass_annotation(aug_tok) + yield aug_tok + + def _first_pass_annotation(self, aug_tok: PunktToken) -> None: + """ + Performs type-based annotation on a single token. + """ + + tok = aug_tok.tok + + if tok in self._lang_vars.sent_end_chars: + aug_tok.sentbreak = True + elif aug_tok.is_ellipsis: + aug_tok.ellipsis = True + elif aug_tok.period_final and not tok.endswith(".."): + if ( + tok[:-1].lower() in self._params.abbrev_types + or tok[:-1].lower().split("-")[-1] in self._params.abbrev_types + ): + + aug_tok.abbr = True + else: + aug_tok.sentbreak = True + + return + + +###################################################################### +# { Punkt Trainer +###################################################################### + + +class PunktTrainer(PunktBaseClass): + """Learns parameters used in Punkt sentence boundary detection.""" + + def __init__( + self, train_text=None, verbose=False, lang_vars=None, token_cls=PunktToken + ): + + PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls) + + self._type_fdist = FreqDist() + """A frequency distribution giving the frequency of each + case-normalized token type in the training data.""" + + self._num_period_toks = 0 + """The number of words ending in period in the training data.""" + + self._collocation_fdist = FreqDist() + """A frequency distribution giving the frequency of all + bigrams in the training data where the first word ends in a + period. Bigrams are encoded as tuples of word types. + Especially common collocations are extracted from this + frequency distribution, and stored in + ``_params``.``collocations ``.""" + + self._sent_starter_fdist = FreqDist() + """A frequency distribution giving the frequency of all words + that occur at the training data at the beginning of a sentence + (after the first pass of annotation). Especially common + sentence starters are extracted from this frequency + distribution, and stored in ``_params.sent_starters``. + """ + + self._sentbreak_count = 0 + """The total number of sentence breaks identified in training, used for + calculating the frequent sentence starter heuristic.""" + + self._finalized = True + """A flag as to whether the training has been finalized by finding + collocations and sentence starters, or whether finalize_training() + still needs to be called.""" + + if train_text: + self.train(train_text, verbose, finalize=True) + + def get_params(self): + """ + Calculates and returns parameters for sentence boundary detection as + derived from training.""" + if not self._finalized: + self.finalize_training() + return self._params + + # //////////////////////////////////////////////////////////// + # { Customization Variables + # //////////////////////////////////////////////////////////// + + ABBREV = 0.3 + """cut-off value whether a 'token' is an abbreviation""" + + IGNORE_ABBREV_PENALTY = False + """allows the disabling of the abbreviation penalty heuristic, which + exponentially disadvantages words that are found at times without a + final period.""" + + ABBREV_BACKOFF = 5 + """upper cut-off for Mikheev's(2002) abbreviation detection algorithm""" + + COLLOCATION = 7.88 + """minimal log-likelihood value that two tokens need to be considered + as a collocation""" + + SENT_STARTER = 30 + """minimal log-likelihood value that a token requires to be considered + as a frequent sentence starter""" + + INCLUDE_ALL_COLLOCS = False + """this includes as potential collocations all word pairs where the first + word ends in a period. It may be useful in corpora where there is a lot + of variation that makes abbreviations like Mr difficult to identify.""" + + INCLUDE_ABBREV_COLLOCS = False + """this includes as potential collocations all word pairs where the first + word is an abbreviation. Such collocations override the orthographic + heuristic, but not the sentence starter heuristic. This is overridden by + INCLUDE_ALL_COLLOCS, and if both are false, only collocations with initials + and ordinals are considered.""" + """""" + + MIN_COLLOC_FREQ = 1 + """this sets a minimum bound on the number of times a bigram needs to + appear before it can be considered a collocation, in addition to log + likelihood statistics. This is useful when INCLUDE_ALL_COLLOCS is True.""" + + # //////////////////////////////////////////////////////////// + # { Training.. + # //////////////////////////////////////////////////////////// + + def train(self, text, verbose=False, finalize=True): + """ + Collects training data from a given text. If finalize is True, it + will determine all the parameters for sentence boundary detection. If + not, this will be delayed until get_params() or finalize_training() is + called. If verbose is True, abbreviations found will be listed. + """ + # Break the text into tokens; record which token indices correspond to + # line starts and paragraph starts; and determine their types. + self._train_tokens(self._tokenize_words(text), verbose) + if finalize: + self.finalize_training(verbose) + + def train_tokens(self, tokens, verbose=False, finalize=True): + """ + Collects training data from a given list of tokens. + """ + self._train_tokens((self._Token(t) for t in tokens), verbose) + if finalize: + self.finalize_training(verbose) + + def _train_tokens(self, tokens, verbose): + self._finalized = False + + # Ensure tokens are a list + tokens = list(tokens) + + # Find the frequency of each case-normalized type. (Don't + # strip off final periods.) Also keep track of the number of + # tokens that end in periods. + for aug_tok in tokens: + self._type_fdist[aug_tok.type] += 1 + if aug_tok.period_final: + self._num_period_toks += 1 + + # Look for new abbreviations, and for types that no longer are + unique_types = self._unique_types(tokens) + for abbr, score, is_add in self._reclassify_abbrev_types(unique_types): + if score >= self.ABBREV: + if is_add: + self._params.abbrev_types.add(abbr) + if verbose: + print(f" Abbreviation: [{score:6.4f}] {abbr}") + else: + if not is_add: + self._params.abbrev_types.remove(abbr) + if verbose: + print(f" Removed abbreviation: [{score:6.4f}] {abbr}") + + # Make a preliminary pass through the document, marking likely + # sentence breaks, abbreviations, and ellipsis tokens. + tokens = list(self._annotate_first_pass(tokens)) + + # Check what contexts each word type can appear in, given the + # case of its first letter. + self._get_orthography_data(tokens) + + # We need total number of sentence breaks to find sentence starters + self._sentbreak_count += self._get_sentbreak_count(tokens) + + # The remaining heuristics relate to pairs of tokens where the first + # ends in a period. + for aug_tok1, aug_tok2 in _pair_iter(tokens): + if not aug_tok1.period_final or not aug_tok2: + continue + + # Is the first token a rare abbreviation? + if self._is_rare_abbrev_type(aug_tok1, aug_tok2): + self._params.abbrev_types.add(aug_tok1.type_no_period) + if verbose: + print(" Rare Abbrev: %s" % aug_tok1.type) + + # Does second token have a high likelihood of starting a sentence? + if self._is_potential_sent_starter(aug_tok2, aug_tok1): + self._sent_starter_fdist[aug_tok2.type] += 1 + + # Is this bigram a potential collocation? + if self._is_potential_collocation(aug_tok1, aug_tok2): + self._collocation_fdist[ + (aug_tok1.type_no_period, aug_tok2.type_no_sentperiod) + ] += 1 + + def _unique_types(self, tokens): + return {aug_tok.type for aug_tok in tokens} + + def finalize_training(self, verbose=False): + """ + Uses data that has been gathered in training to determine likely + collocations and sentence starters. + """ + self._params.clear_sent_starters() + for typ, log_likelihood in self._find_sent_starters(): + self._params.sent_starters.add(typ) + if verbose: + print(f" Sent Starter: [{log_likelihood:6.4f}] {typ!r}") + + self._params.clear_collocations() + for (typ1, typ2), log_likelihood in self._find_collocations(): + self._params.collocations.add((typ1, typ2)) + if verbose: + print(f" Collocation: [{log_likelihood:6.4f}] {typ1!r}+{typ2!r}") + + self._finalized = True + + # //////////////////////////////////////////////////////////// + # { Overhead reduction + # //////////////////////////////////////////////////////////// + + def freq_threshold( + self, ortho_thresh=2, type_thresh=2, colloc_thres=2, sentstart_thresh=2 + ): + """ + Allows memory use to be reduced after much training by removing data + about rare tokens that are unlikely to have a statistical effect with + further training. Entries occurring above the given thresholds will be + retained. + """ + if ortho_thresh > 1: + old_oc = self._params.ortho_context + self._params.clear_ortho_context() + for tok in self._type_fdist: + count = self._type_fdist[tok] + if count >= ortho_thresh: + self._params.ortho_context[tok] = old_oc[tok] + + self._type_fdist = self._freq_threshold(self._type_fdist, type_thresh) + self._collocation_fdist = self._freq_threshold( + self._collocation_fdist, colloc_thres + ) + self._sent_starter_fdist = self._freq_threshold( + self._sent_starter_fdist, sentstart_thresh + ) + + def _freq_threshold(self, fdist, threshold): + """ + Returns a FreqDist containing only data with counts below a given + threshold, as well as a mapping (None -> count_removed). + """ + # We assume that there is more data below the threshold than above it + # and so create a new FreqDist rather than working in place. + res = FreqDist() + num_removed = 0 + for tok in fdist: + count = fdist[tok] + if count < threshold: + num_removed += 1 + else: + res[tok] += count + res[None] += num_removed + return res + + # //////////////////////////////////////////////////////////// + # { Orthographic data + # //////////////////////////////////////////////////////////// + + def _get_orthography_data(self, tokens): + """ + Collect information about whether each token type occurs + with different case patterns (i) overall, (ii) at + sentence-initial positions, and (iii) at sentence-internal + positions. + """ + # 'initial' or 'internal' or 'unknown' + context = "internal" + tokens = list(tokens) + + for aug_tok in tokens: + # If we encounter a paragraph break, then it's a good sign + # that it's a sentence break. But err on the side of + # caution (by not positing a sentence break) if we just + # saw an abbreviation. + if aug_tok.parastart and context != "unknown": + context = "initial" + + # If we're at the beginning of a line, then we can't decide + # between 'internal' and 'initial'. + if aug_tok.linestart and context == "internal": + context = "unknown" + + # Find the case-normalized type of the token. If it's a + # sentence-final token, strip off the period. + typ = aug_tok.type_no_sentperiod + + # Update the orthographic context table. + flag = _ORTHO_MAP.get((context, aug_tok.first_case), 0) + if flag: + self._params.add_ortho_context(typ, flag) + + # Decide whether the next word is at a sentence boundary. + if aug_tok.sentbreak: + if not (aug_tok.is_number or aug_tok.is_initial): + context = "initial" + else: + context = "unknown" + elif aug_tok.ellipsis or aug_tok.abbr: + context = "unknown" + else: + context = "internal" + + # //////////////////////////////////////////////////////////// + # { Abbreviations + # //////////////////////////////////////////////////////////// + + def _reclassify_abbrev_types(self, types): + """ + (Re)classifies each given token if + - it is period-final and not a known abbreviation; or + - it is not period-final and is otherwise a known abbreviation + by checking whether its previous classification still holds according + to the heuristics of section 3. + Yields triples (abbr, score, is_add) where abbr is the type in question, + score is its log-likelihood with penalties applied, and is_add specifies + whether the present type is a candidate for inclusion or exclusion as an + abbreviation, such that: + - (is_add and score >= 0.3) suggests a new abbreviation; and + - (not is_add and score < 0.3) suggests excluding an abbreviation. + """ + # (While one could recalculate abbreviations from all .-final tokens at + # every iteration, in cases requiring efficiency, the number of tokens + # in the present training document will be much less.) + + for typ in types: + # Check some basic conditions, to rule out words that are + # clearly not abbrev_types. + if not _re_non_punct.search(typ) or typ == "##number##": + continue + + if typ.endswith("."): + if typ in self._params.abbrev_types: + continue + typ = typ[:-1] + is_add = True + else: + if typ not in self._params.abbrev_types: + continue + is_add = False + + # Count how many periods & nonperiods are in the + # candidate. + num_periods = typ.count(".") + 1 + num_nonperiods = len(typ) - num_periods + 1 + + # Let be the candidate without the period, and + # be the period. Find a log likelihood ratio that + # indicates whether occurs as a single unit (high + # value of log_likelihood), or as two independent units and + # (low value of log_likelihood). + count_with_period = self._type_fdist[typ + "."] + count_without_period = self._type_fdist[typ] + log_likelihood = self._dunning_log_likelihood( + count_with_period + count_without_period, + self._num_period_toks, + count_with_period, + self._type_fdist.N(), + ) + + # Apply three scaling factors to 'tweak' the basic log + # likelihood ratio: + # F_length: long word -> less likely to be an abbrev + # F_periods: more periods -> more likely to be an abbrev + # F_penalty: penalize occurrences w/o a period + f_length = math.exp(-num_nonperiods) + f_periods = num_periods + f_penalty = int(self.IGNORE_ABBREV_PENALTY) or math.pow( + num_nonperiods, -count_without_period + ) + score = log_likelihood * f_length * f_periods * f_penalty + + yield typ, score, is_add + + def find_abbrev_types(self): + """ + Recalculates abbreviations given type frequencies, despite no prior + determination of abbreviations. + This fails to include abbreviations otherwise found as "rare". + """ + self._params.clear_abbrevs() + tokens = (typ for typ in self._type_fdist if typ and typ.endswith(".")) + for abbr, score, _is_add in self._reclassify_abbrev_types(tokens): + if score >= self.ABBREV: + self._params.abbrev_types.add(abbr) + + # This function combines the work done by the original code's + # functions `count_orthography_context`, `get_orthography_count`, + # and `get_rare_abbreviations`. + def _is_rare_abbrev_type(self, cur_tok, next_tok): + """ + A word type is counted as a rare abbreviation if... + - it's not already marked as an abbreviation + - it occurs fewer than ABBREV_BACKOFF times + - either it is followed by a sentence-internal punctuation + mark, *or* it is followed by a lower-case word that + sometimes appears with upper case, but never occurs with + lower case at the beginning of sentences. + """ + if cur_tok.abbr or not cur_tok.sentbreak: + return False + + # Find the case-normalized type of the token. If it's + # a sentence-final token, strip off the period. + typ = cur_tok.type_no_sentperiod + + # Proceed only if the type hasn't been categorized as an + # abbreviation already, and is sufficiently rare... + count = self._type_fdist[typ] + self._type_fdist[typ[:-1]] + if typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF: + return False + + # Record this token as an abbreviation if the next + # token is a sentence-internal punctuation mark. + # [XX] :1 or check the whole thing?? + if next_tok.tok[:1] in self._lang_vars.internal_punctuation: + return True + + # Record this type as an abbreviation if the next + # token... (i) starts with a lower case letter, + # (ii) sometimes occurs with an uppercase letter, + # and (iii) never occus with an uppercase letter + # sentence-internally. + # [xx] should the check for (ii) be modified?? + if next_tok.first_lower: + typ2 = next_tok.type_no_sentperiod + typ2ortho_context = self._params.ortho_context[typ2] + if (typ2ortho_context & _ORTHO_BEG_UC) and not ( + typ2ortho_context & _ORTHO_MID_UC + ): + return True + + # //////////////////////////////////////////////////////////// + # { Log Likelihoods + # //////////////////////////////////////////////////////////// + + # helper for _reclassify_abbrev_types: + @staticmethod + def _dunning_log_likelihood(count_a, count_b, count_ab, N): + """ + A function that calculates the modified Dunning log-likelihood + ratio scores for abbreviation candidates. The details of how + this works is available in the paper. + """ + p1 = count_b / N + p2 = 0.99 + + null_hypo = count_ab * math.log(p1) + (count_a - count_ab) * math.log(1.0 - p1) + alt_hypo = count_ab * math.log(p2) + (count_a - count_ab) * math.log(1.0 - p2) + + likelihood = null_hypo - alt_hypo + + return -2.0 * likelihood + + @staticmethod + def _col_log_likelihood(count_a, count_b, count_ab, N): + """ + A function that will just compute log-likelihood estimate, in + the original paper it's described in algorithm 6 and 7. + + This *should* be the original Dunning log-likelihood values, + unlike the previous log_l function where it used modified + Dunning log-likelihood values + """ + p = count_b / N + p1 = count_ab / count_a + try: + p2 = (count_b - count_ab) / (N - count_a) + except ZeroDivisionError: + p2 = 1 + + try: + summand1 = count_ab * math.log(p) + (count_a - count_ab) * math.log(1.0 - p) + except ValueError: + summand1 = 0 + + try: + summand2 = (count_b - count_ab) * math.log(p) + ( + N - count_a - count_b + count_ab + ) * math.log(1.0 - p) + except ValueError: + summand2 = 0 + + if count_a == count_ab or p1 <= 0 or p1 >= 1: + summand3 = 0 + else: + summand3 = count_ab * math.log(p1) + (count_a - count_ab) * math.log( + 1.0 - p1 + ) + + if count_b == count_ab or p2 <= 0 or p2 >= 1: + summand4 = 0 + else: + summand4 = (count_b - count_ab) * math.log(p2) + ( + N - count_a - count_b + count_ab + ) * math.log(1.0 - p2) + + likelihood = summand1 + summand2 - summand3 - summand4 + + return -2.0 * likelihood + + # //////////////////////////////////////////////////////////// + # { Collocation Finder + # //////////////////////////////////////////////////////////// + + def _is_potential_collocation(self, aug_tok1, aug_tok2): + """ + Returns True if the pair of tokens may form a collocation given + log-likelihood statistics. + """ + return ( + ( + self.INCLUDE_ALL_COLLOCS + or (self.INCLUDE_ABBREV_COLLOCS and aug_tok1.abbr) + or (aug_tok1.sentbreak and (aug_tok1.is_number or aug_tok1.is_initial)) + ) + and aug_tok1.is_non_punct + and aug_tok2.is_non_punct + ) + + def _find_collocations(self): + """ + Generates likely collocations and their log-likelihood. + """ + for types in self._collocation_fdist: + try: + typ1, typ2 = types + except TypeError: + # types may be None after calling freq_threshold() + continue + if typ2 in self._params.sent_starters: + continue + + col_count = self._collocation_fdist[types] + typ1_count = self._type_fdist[typ1] + self._type_fdist[typ1 + "."] + typ2_count = self._type_fdist[typ2] + self._type_fdist[typ2 + "."] + if ( + typ1_count > 1 + and typ2_count > 1 + and self.MIN_COLLOC_FREQ < col_count <= min(typ1_count, typ2_count) + ): + + log_likelihood = self._col_log_likelihood( + typ1_count, typ2_count, col_count, self._type_fdist.N() + ) + # Filter out the not-so-collocative + if log_likelihood >= self.COLLOCATION and ( + self._type_fdist.N() / typ1_count > typ2_count / col_count + ): + yield (typ1, typ2), log_likelihood + + # //////////////////////////////////////////////////////////// + # { Sentence-Starter Finder + # //////////////////////////////////////////////////////////// + + def _is_potential_sent_starter(self, cur_tok, prev_tok): + """ + Returns True given a token and the token that precedes it if it + seems clear that the token is beginning a sentence. + """ + # If a token (i) is preceded by a sentece break that is + # not a potential ordinal number or initial, and (ii) is + # alphabetic, then it is a a sentence-starter. + return ( + prev_tok.sentbreak + and not (prev_tok.is_number or prev_tok.is_initial) + and cur_tok.is_alpha + ) + + def _find_sent_starters(self): + """ + Uses collocation heuristics for each candidate token to + determine if it frequently starts sentences. + """ + for typ in self._sent_starter_fdist: + if not typ: + continue + + typ_at_break_count = self._sent_starter_fdist[typ] + typ_count = self._type_fdist[typ] + self._type_fdist[typ + "."] + if typ_count < typ_at_break_count: + # needed after freq_threshold + continue + + log_likelihood = self._col_log_likelihood( + self._sentbreak_count, + typ_count, + typ_at_break_count, + self._type_fdist.N(), + ) + + if ( + log_likelihood >= self.SENT_STARTER + and self._type_fdist.N() / self._sentbreak_count + > typ_count / typ_at_break_count + ): + yield typ, log_likelihood + + def _get_sentbreak_count(self, tokens): + """ + Returns the number of sentence breaks marked in a given set of + augmented tokens. + """ + return sum(1 for aug_tok in tokens if aug_tok.sentbreak) + + +###################################################################### +# { Punkt Sentence Tokenizer +###################################################################### + + +class PunktSentenceTokenizer(PunktBaseClass, TokenizerI): + """ + A sentence tokenizer which uses an unsupervised algorithm to build + a model for abbreviation words, collocations, and words that start + sentences; and then uses that model to find sentence boundaries. + This approach has been shown to work well for many European + languages. + """ + + def __init__( + self, train_text=None, verbose=False, lang_vars=None, token_cls=PunktToken + ): + """ + train_text can either be the sole training text for this sentence + boundary detector, or can be a PunktParameters object. + """ + PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls) + + if train_text: + self._params = self.train(train_text, verbose) + + def train(self, train_text, verbose=False): + """ + Derives parameters from a given training text, or uses the parameters + given. Repeated calls to this method destroy previous parameters. For + incremental training, instantiate a separate PunktTrainer instance. + """ + if not isinstance(train_text, str): + return train_text + return PunktTrainer( + train_text, lang_vars=self._lang_vars, token_cls=self._Token + ).get_params() + + # //////////////////////////////////////////////////////////// + # { Tokenization + # //////////////////////////////////////////////////////////// + + def tokenize(self, text: str, realign_boundaries: bool = True) -> List[str]: + """ + Given a text, returns a list of the sentences in that text. + """ + return list(self.sentences_from_text(text, realign_boundaries)) + + def debug_decisions(self, text: str) -> Iterator[Dict[str, Any]]: + """ + Classifies candidate periods as sentence breaks, yielding a dict for + each that may be used to understand why the decision was made. + + See format_debug_decision() to help make this output readable. + """ + + for match, decision_text in self._match_potential_end_contexts(text): + tokens = self._tokenize_words(decision_text) + tokens = list(self._annotate_first_pass(tokens)) + while tokens and not tokens[0].tok.endswith(self._lang_vars.sent_end_chars): + tokens.pop(0) + yield { + "period_index": match.end() - 1, + "text": decision_text, + "type1": tokens[0].type, + "type2": tokens[1].type, + "type1_in_abbrs": bool(tokens[0].abbr), + "type1_is_initial": bool(tokens[0].is_initial), + "type2_is_sent_starter": tokens[1].type_no_sentperiod + in self._params.sent_starters, + "type2_ortho_heuristic": self._ortho_heuristic(tokens[1]), + "type2_ortho_contexts": set( + self._params._debug_ortho_context(tokens[1].type_no_sentperiod) + ), + "collocation": ( + tokens[0].type_no_sentperiod, + tokens[1].type_no_sentperiod, + ) + in self._params.collocations, + "reason": self._second_pass_annotation(tokens[0], tokens[1]) + or REASON_DEFAULT_DECISION, + "break_decision": tokens[0].sentbreak, + } + + def span_tokenize( + self, text: str, realign_boundaries: bool = True + ) -> Iterator[Tuple[int, int]]: + """ + Given a text, generates (start, end) spans of sentences + in the text. + """ + slices = self._slices_from_text(text) + if realign_boundaries: + slices = self._realign_boundaries(text, slices) + for sentence in slices: + yield (sentence.start, sentence.stop) + + def sentences_from_text( + self, text: str, realign_boundaries: bool = True + ) -> List[str]: + """ + Given a text, generates the sentences in that text by only + testing candidate sentence breaks. If realign_boundaries is + True, includes in the sentence closing punctuation that + follows the period. + """ + return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)] + + def _get_last_whitespace_index(self, text: str) -> int: + """ + Given a text, find the index of the *last* occurrence of *any* + whitespace character, i.e. " ", "\n", "\t", "\r", etc. + If none is found, return 0. + """ + for i in range(len(text) - 1, -1, -1): + if text[i] in string.whitespace: + return i + return 0 + + def _match_potential_end_contexts(self, text: str) -> Iterator[Tuple[Match, str]]: + """ + Given a text, find the matches of potential sentence breaks, + alongside the contexts surrounding these sentence breaks. + + Since the fix for the ReDOS discovered in issue #2866, we no longer match + the word before a potential end of sentence token. Instead, we use a separate + regex for this. As a consequence, `finditer`'s desire to find non-overlapping + matches no longer aids us in finding the single longest match. + Where previously, we could use:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._lang_vars.period_context_re().finditer(text)) # doctest: +SKIP + [] + + Now we have to find the word before (i.e. 'acting') separately, and `finditer` + returns:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._lang_vars.period_context_re().finditer(text)) # doctest: +NORMALIZE_WHITESPACE + [, + , + ] + + So, we need to find the word before the match from right to left, and then manually remove + the overlaps. That is what this method does:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._match_potential_end_contexts(text)) + [(, 'acting!!! I')] + + :param text: String of one or more sentences + :type text: str + :return: Generator of match-context tuples. + :rtype: Iterator[Tuple[Match, str]] + """ + previous_slice = slice(0, 0) + previous_match = None + for match in self._lang_vars.period_context_re().finditer(text): + + # Get the slice of the previous word + before_text = text[previous_slice.stop : match.start()] + index_after_last_space = self._get_last_whitespace_index(before_text) + if index_after_last_space: + # + 1 to exclude the space itself + index_after_last_space += previous_slice.stop + 1 + else: + index_after_last_space = previous_slice.start + prev_word_slice = slice(index_after_last_space, match.start()) + + # If the previous slice does not overlap with this slice, then + # we can yield the previous match and slice. If there is an overlap, + # then we do not yield the previous match and slice. + if previous_match and previous_slice.stop <= prev_word_slice.start: + yield ( + previous_match, + text[previous_slice] + + previous_match.group() + + previous_match.group("after_tok"), + ) + previous_match = match + previous_slice = prev_word_slice + + # Yield the last match and context, if it exists + if previous_match: + yield ( + previous_match, + text[previous_slice] + + previous_match.group() + + previous_match.group("after_tok"), + ) + + def _slices_from_text(self, text: str) -> Iterator[slice]: + last_break = 0 + for match, context in self._match_potential_end_contexts(text): + if self.text_contains_sentbreak(context): + yield slice(last_break, match.end()) + if match.group("next_tok"): + # next sentence starts after whitespace + last_break = match.start("next_tok") + else: + # next sentence starts at following punctuation + last_break = match.end() + # The last sentence should not contain trailing whitespace. + yield slice(last_break, len(text.rstrip())) + + def _realign_boundaries( + self, text: str, slices: Iterator[slice] + ) -> Iterator[slice]: + """ + Attempts to realign punctuation that falls after the period but + should otherwise be included in the same sentence. + + For example: "(Sent1.) Sent2." will otherwise be split as:: + + ["(Sent1.", ") Sent1."]. + + This method will produce:: + + ["(Sent1.)", "Sent2."]. + """ + realign = 0 + for sentence1, sentence2 in _pair_iter(slices): + sentence1 = slice(sentence1.start + realign, sentence1.stop) + if not sentence2: + if text[sentence1]: + yield sentence1 + continue + + m = self._lang_vars.re_boundary_realignment.match(text[sentence2]) + if m: + yield slice(sentence1.start, sentence2.start + len(m.group(0).rstrip())) + realign = m.end() + else: + realign = 0 + if text[sentence1]: + yield sentence1 + + def text_contains_sentbreak(self, text: str) -> bool: + """ + Returns True if the given text includes a sentence break. + """ + found = False # used to ignore last token + for tok in self._annotate_tokens(self._tokenize_words(text)): + if found: + return True + if tok.sentbreak: + found = True + return False + + def sentences_from_text_legacy(self, text: str) -> Iterator[str]: + """ + Given a text, generates the sentences in that text. Annotates all + tokens, rather than just those with possible sentence breaks. Should + produce the same results as ``sentences_from_text``. + """ + tokens = self._annotate_tokens(self._tokenize_words(text)) + return self._build_sentence_list(text, tokens) + + def sentences_from_tokens( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Given a sequence of tokens, generates lists of tokens, each list + corresponding to a sentence. + """ + tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens)) + sentence = [] + for aug_tok in tokens: + sentence.append(aug_tok.tok) + if aug_tok.sentbreak: + yield sentence + sentence = [] + if sentence: + yield sentence + + def _annotate_tokens(self, tokens: Iterator[PunktToken]) -> Iterator[PunktToken]: + """ + Given a set of tokens augmented with markers for line-start and + paragraph-start, returns an iterator through those tokens with full + annotation including predicted sentence breaks. + """ + # Make a preliminary pass through the document, marking likely + # sentence breaks, abbreviations, and ellipsis tokens. + tokens = self._annotate_first_pass(tokens) + + # Make a second pass through the document, using token context + # information to change our preliminary decisions about where + # sentence breaks, abbreviations, and ellipsis occurs. + tokens = self._annotate_second_pass(tokens) + + ## [XX] TESTING + # tokens = list(tokens) + # self.dump(tokens) + + return tokens + + def _build_sentence_list( + self, text: str, tokens: Iterator[PunktToken] + ) -> Iterator[str]: + """ + Given the original text and the list of augmented word tokens, + construct and return a tokenized list of sentence strings. + """ + # Most of the work here is making sure that we put the right + # pieces of whitespace back in all the right places. + + # Our position in the source text, used to keep track of which + # whitespace to add: + pos = 0 + + # A regular expression that finds pieces of whitespace: + white_space_regexp = re.compile(r"\s*") + + sentence = "" + for aug_tok in tokens: + tok = aug_tok.tok + + # Find the whitespace before this token, and update pos. + white_space = white_space_regexp.match(text, pos).group() + pos += len(white_space) + + # Some of the rules used by the punkt word tokenizer + # strip whitespace out of the text, resulting in tokens + # that contain whitespace in the source text. If our + # token doesn't match, see if adding whitespace helps. + # If so, then use the version with whitespace. + if text[pos : pos + len(tok)] != tok: + pat = r"\s*".join(re.escape(c) for c in tok) + m = re.compile(pat).match(text, pos) + if m: + tok = m.group() + + # Move our position pointer to the end of the token. + assert text[pos : pos + len(tok)] == tok + pos += len(tok) + + # Add this token. If it's not at the beginning of the + # sentence, then include any whitespace that separated it + # from the previous token. + if sentence: + sentence += white_space + sentence += tok + + # If we're at a sentence break, then start a new sentence. + if aug_tok.sentbreak: + yield sentence + sentence = "" + + # If the last sentence is empty, discard it. + if sentence: + yield sentence + + # [XX] TESTING + def dump(self, tokens: Iterator[PunktToken]) -> None: + print("writing to /tmp/punkt.new...") + with open("/tmp/punkt.new", "w") as outfile: + for aug_tok in tokens: + if aug_tok.parastart: + outfile.write("\n\n") + elif aug_tok.linestart: + outfile.write("\n") + else: + outfile.write(" ") + + outfile.write(str(aug_tok)) + + # //////////////////////////////////////////////////////////// + # { Customization Variables + # //////////////////////////////////////////////////////////// + + PUNCTUATION = tuple(";:,.!?") + + # //////////////////////////////////////////////////////////// + # { Annotation Procedures + # //////////////////////////////////////////////////////////// + + def _annotate_second_pass( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Performs a token-based classification (section 4) over the given + tokens, making use of the orthographic heuristic (4.1.1), collocation + heuristic (4.1.2) and frequent sentence starter heuristic (4.1.3). + """ + for token1, token2 in _pair_iter(tokens): + self._second_pass_annotation(token1, token2) + yield token1 + + def _second_pass_annotation( + self, aug_tok1: PunktToken, aug_tok2: Optional[PunktToken] + ) -> Optional[str]: + """ + Performs token-based classification over a pair of contiguous tokens + updating the first. + """ + # Is it the last token? We can't do anything then. + if not aug_tok2: + return + + if not aug_tok1.period_final: + # We only care about words ending in periods. + return + typ = aug_tok1.type_no_period + next_typ = aug_tok2.type_no_sentperiod + tok_is_initial = aug_tok1.is_initial + + # [4.1.2. Collocation Heuristic] If there's a + # collocation between the word before and after the + # period, then label tok as an abbreviation and NOT + # a sentence break. Note that collocations with + # frequent sentence starters as their second word are + # excluded in training. + if (typ, next_typ) in self._params.collocations: + aug_tok1.sentbreak = False + aug_tok1.abbr = True + return REASON_KNOWN_COLLOCATION + + # [4.2. Token-Based Reclassification of Abbreviations] If + # the token is an abbreviation or an ellipsis, then decide + # whether we should *also* classify it as a sentbreak. + if (aug_tok1.abbr or aug_tok1.ellipsis) and (not tok_is_initial): + # [4.1.1. Orthographic Heuristic] Check if there's + # orthogrpahic evidence about whether the next word + # starts a sentence or not. + is_sent_starter = self._ortho_heuristic(aug_tok2) + if is_sent_starter == True: + aug_tok1.sentbreak = True + return REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC + + # [4.1.3. Frequent Sentence Starter Heruistic] If the + # next word is capitalized, and is a member of the + # frequent-sentence-starters list, then label tok as a + # sentence break. + if aug_tok2.first_upper and next_typ in self._params.sent_starters: + aug_tok1.sentbreak = True + return REASON_ABBR_WITH_SENTENCE_STARTER + + # [4.3. Token-Based Detection of Initials and Ordinals] + # Check if any initials or ordinals tokens that are marked + # as sentbreaks should be reclassified as abbreviations. + if tok_is_initial or typ == "##number##": + + # [4.1.1. Orthographic Heuristic] Check if there's + # orthogrpahic evidence about whether the next word + # starts a sentence or not. + is_sent_starter = self._ortho_heuristic(aug_tok2) + + if is_sent_starter == False: + aug_tok1.sentbreak = False + aug_tok1.abbr = True + if tok_is_initial: + return REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC + return REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC + + # Special heuristic for initials: if orthogrpahic + # heuristic is unknown, and next word is always + # capitalized, then mark as abbrev (eg: J. Bach). + if ( + is_sent_starter == "unknown" + and tok_is_initial + and aug_tok2.first_upper + and not (self._params.ortho_context[next_typ] & _ORTHO_LC) + ): + aug_tok1.sentbreak = False + aug_tok1.abbr = True + return REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC + + return + + def _ortho_heuristic(self, aug_tok: PunktToken) -> Union[bool, str]: + """ + Decide whether the given token is the first token in a sentence. + """ + # Sentences don't start with punctuation marks: + if aug_tok.tok in self.PUNCTUATION: + return False + + ortho_context = self._params.ortho_context[aug_tok.type_no_sentperiod] + + # If the word is capitalized, occurs at least once with a + # lower case first letter, and never occurs with an upper case + # first letter sentence-internally, then it's a sentence starter. + if ( + aug_tok.first_upper + and (ortho_context & _ORTHO_LC) + and not (ortho_context & _ORTHO_MID_UC) + ): + return True + + # If the word is lower case, and either (a) we've seen it used + # with upper case, or (b) we've never seen it used + # sentence-initially with lower case, then it's not a sentence + # starter. + if aug_tok.first_lower and ( + (ortho_context & _ORTHO_UC) or not (ortho_context & _ORTHO_BEG_LC) + ): + return False + + # Otherwise, we're not sure. + return "unknown" + + +DEBUG_DECISION_FMT = """Text: {text!r} (at offset {period_index}) +Sentence break? {break_decision} ({reason}) +Collocation? {collocation} +{type1!r}: + known abbreviation: {type1_in_abbrs} + is initial: {type1_is_initial} +{type2!r}: + known sentence starter: {type2_is_sent_starter} + orthographic heuristic suggests is a sentence starter? {type2_ortho_heuristic} + orthographic contexts in training: {type2_ortho_contexts} +""" + + +def format_debug_decision(d): + return DEBUG_DECISION_FMT.format(**d) + + +def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer): + """Builds a punkt model and applies it to the same text""" + cleanup = ( + lambda s: re.compile(r"(?:\r|^\s+)", re.MULTILINE).sub("", s).replace("\n", " ") + ) + trainer = train_cls() + trainer.INCLUDE_ALL_COLLOCS = True + trainer.train(text) + sbd = tok_cls(trainer.get_params()) + for sentence in sbd.sentences_from_text(text): + print(cleanup(sentence)) diff --git a/lib/python3.10/site-packages/nltk/tokenize/regexp.py b/lib/python3.10/site-packages/nltk/tokenize/regexp.py new file mode 100644 index 0000000000000000000000000000000000000000..e3875b1447ba2843b7e6f186de24b4e67baf8844 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tokenize/regexp.py @@ -0,0 +1,220 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +r""" +Regular-Expression Tokenizers + +A ``RegexpTokenizer`` splits a string into substrings using a regular expression. +For example, the following tokenizer forms tokens out of alphabetic sequences, +money expressions, and any other non-whitespace sequences: + + >>> from nltk.tokenize import RegexpTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+') + >>> tokenizer.tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +A ``RegexpTokenizer`` can use its regexp to match delimiters instead: + + >>> tokenizer = RegexpTokenizer(r'\s+', gaps=True) + >>> tokenizer.tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + +Note that empty tokens are not returned when the delimiter appears at +the start or end of the string. + +The material between the tokens is discarded. For example, +the following tokenizer selects just the capitalized words: + + >>> capword_tokenizer = RegexpTokenizer(r'[A-Z]\w+') + >>> capword_tokenizer.tokenize(s) + ['Good', 'New', 'York', 'Please', 'Thanks'] + +This module contains several subclasses of ``RegexpTokenizer`` +that use pre-defined regular expressions. + + >>> from nltk.tokenize import BlanklineTokenizer + >>> # Uses '\s*\n\s*\n\s*': + >>> BlanklineTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.', + 'Thanks.'] + +All of the regular expression tokenizers are also available as functions: + + >>> from nltk.tokenize import regexp_tokenize, wordpunct_tokenize, blankline_tokenize + >>> regexp_tokenize(s, pattern=r'\w+|\$[\d\.]+|\S+') # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> wordpunct_tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', + '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> blankline_tokenize(s) + ['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.', 'Thanks.'] + +Caution: The function ``regexp_tokenize()`` takes the text as its +first argument, and the regular expression pattern as its second +argument. This differs from the conventions used by Python's +``re`` functions, where the pattern is always the first argument. +(This is for consistency with the other NLTK tokenizers.) +""" + +import re + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import regexp_span_tokenize + + +class RegexpTokenizer(TokenizerI): + r""" + A tokenizer that splits a string using a regular expression, which + matches either the tokens or the separators between tokens. + + >>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+') + + :type pattern: str + :param pattern: The pattern used to build this tokenizer. + (This pattern must not contain capturing parentheses; + Use non-capturing parentheses, e.g. (?:...), instead) + :type gaps: bool + :param gaps: True if this tokenizer's pattern should be used + to find separators between tokens; False if this + tokenizer's pattern should be used to find the tokens + themselves. + :type discard_empty: bool + :param discard_empty: True if any empty tokens `''` + generated by the tokenizer should be discarded. Empty + tokens can only be generated if `_gaps == True`. + :type flags: int + :param flags: The regexp flags used to compile this + tokenizer's pattern. By default, the following flags are + used: `re.UNICODE | re.MULTILINE | re.DOTALL`. + + """ + + def __init__( + self, + pattern, + gaps=False, + discard_empty=True, + flags=re.UNICODE | re.MULTILINE | re.DOTALL, + ): + # If they gave us a regexp object, extract the pattern. + pattern = getattr(pattern, "pattern", pattern) + + self._pattern = pattern + self._gaps = gaps + self._discard_empty = discard_empty + self._flags = flags + self._regexp = None + + def _check_regexp(self): + if self._regexp is None: + self._regexp = re.compile(self._pattern, self._flags) + + def tokenize(self, text): + self._check_regexp() + # If our regexp matches gaps, use re.split: + if self._gaps: + if self._discard_empty: + return [tok for tok in self._regexp.split(text) if tok] + else: + return self._regexp.split(text) + + # If our regexp matches tokens, use re.findall: + else: + return self._regexp.findall(text) + + def span_tokenize(self, text): + self._check_regexp() + + if self._gaps: + for left, right in regexp_span_tokenize(text, self._regexp): + if not (self._discard_empty and left == right): + yield left, right + else: + for m in re.finditer(self._regexp, text): + yield m.span() + + def __repr__(self): + return "{}(pattern={!r}, gaps={!r}, discard_empty={!r}, flags={!r})".format( + self.__class__.__name__, + self._pattern, + self._gaps, + self._discard_empty, + self._flags, + ) + + +class WhitespaceTokenizer(RegexpTokenizer): + r""" + Tokenize a string on whitespace (space, tab, newline). + In general, users should use the string ``split()`` method instead. + + >>> from nltk.tokenize import WhitespaceTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> WhitespaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\s+", gaps=True) + + +class BlanklineTokenizer(RegexpTokenizer): + """ + Tokenize a string, treating any sequence of blank lines as a delimiter. + Blank lines are defined as lines containing no characters, except for + space or tab characters. + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\s*\n\s*\n\s*", gaps=True) + + +class WordPunctTokenizer(RegexpTokenizer): + r""" + Tokenize a text into a sequence of alphabetic and + non-alphabetic characters, using the regexp ``\w+|[^\w\s]+``. + + >>> from nltk.tokenize import WordPunctTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> WordPunctTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', + '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\w+|[^\w\s]+") + + +###################################################################### +# { Tokenization Functions +###################################################################### + + +def regexp_tokenize( + text, + pattern, + gaps=False, + discard_empty=True, + flags=re.UNICODE | re.MULTILINE | re.DOTALL, +): + """ + Return a tokenized copy of *text*. See :class:`.RegexpTokenizer` + for descriptions of the arguments. + """ + tokenizer = RegexpTokenizer(pattern, gaps, discard_empty, flags) + return tokenizer.tokenize(text) + + +blankline_tokenize = BlanklineTokenizer().tokenize +wordpunct_tokenize = WordPunctTokenizer().tokenize diff --git a/lib/python3.10/site-packages/nltk/tokenize/sexpr.py b/lib/python3.10/site-packages/nltk/tokenize/sexpr.py new file mode 100644 index 0000000000000000000000000000000000000000..0776642fbd2759c3f37352a97b18d915198cc20c --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tokenize/sexpr.py @@ -0,0 +1,140 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Yoav Goldberg +# Steven Bird (minor edits) +# URL: +# For license information, see LICENSE.TXT + +""" +S-Expression Tokenizer + +``SExprTokenizer`` is used to find parenthesized expressions in a +string. In particular, it divides a string into a sequence of +substrings that are either parenthesized expressions (including any +nested parenthesized expressions), or other whitespace-separated +tokens. + + >>> from nltk.tokenize import SExprTokenizer + >>> SExprTokenizer().tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + +By default, `SExprTokenizer` will raise a ``ValueError`` exception if +used to tokenize an expression with non-matching parentheses: + + >>> SExprTokenizer().tokenize('c) d) e (f (g') + Traceback (most recent call last): + ... + ValueError: Un-matched close paren at char 1 + +The ``strict`` argument can be set to False to allow for +non-matching parentheses. Any unmatched close parentheses will be +listed as their own s-expression; and the last partial sexpr with +unmatched open parentheses will be listed as its own sexpr: + + >>> SExprTokenizer(strict=False).tokenize('c) d) e (f (g') + ['c', ')', 'd', ')', 'e', '(f (g'] + +The characters used for open and close parentheses may be customized +using the ``parens`` argument to the `SExprTokenizer` constructor: + + >>> SExprTokenizer(parens='{}').tokenize('{a b {c d}} e f {g}') + ['{a b {c d}}', 'e', 'f', '{g}'] + +The s-expression tokenizer is also available as a function: + + >>> from nltk.tokenize import sexpr_tokenize + >>> sexpr_tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + +""" + +import re + +from nltk.tokenize.api import TokenizerI + + +class SExprTokenizer(TokenizerI): + """ + A tokenizer that divides strings into s-expressions. + An s-expresion can be either: + + - a parenthesized expression, including any nested parenthesized + expressions, or + - a sequence of non-whitespace non-parenthesis characters. + + For example, the string ``(a (b c)) d e (f)`` consists of four + s-expressions: ``(a (b c))``, ``d``, ``e``, and ``(f)``. + + By default, the characters ``(`` and ``)`` are treated as open and + close parentheses, but alternative strings may be specified. + + :param parens: A two-element sequence specifying the open and close parentheses + that should be used to find sexprs. This will typically be either a + two-character string, or a list of two strings. + :type parens: str or list + :param strict: If true, then raise an exception when tokenizing an ill-formed sexpr. + """ + + def __init__(self, parens="()", strict=True): + if len(parens) != 2: + raise ValueError("parens must contain exactly two strings") + self._strict = strict + self._open_paren = parens[0] + self._close_paren = parens[1] + self._paren_regexp = re.compile( + f"{re.escape(parens[0])}|{re.escape(parens[1])}" + ) + + def tokenize(self, text): + """ + Return a list of s-expressions extracted from *text*. + For example: + + >>> SExprTokenizer().tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + + All parentheses are assumed to mark s-expressions. + (No special processing is done to exclude parentheses that occur + inside strings, or following backslash characters.) + + If the given expression contains non-matching parentheses, + then the behavior of the tokenizer depends on the ``strict`` + parameter to the constructor. If ``strict`` is ``True``, then + raise a ``ValueError``. If ``strict`` is ``False``, then any + unmatched close parentheses will be listed as their own + s-expression; and the last partial s-expression with unmatched open + parentheses will be listed as its own s-expression: + + >>> SExprTokenizer(strict=False).tokenize('c) d) e (f (g') + ['c', ')', 'd', ')', 'e', '(f (g'] + + :param text: the string to be tokenized + :type text: str or iter(str) + :rtype: iter(str) + """ + result = [] + pos = 0 + depth = 0 + for m in self._paren_regexp.finditer(text): + paren = m.group() + if depth == 0: + result += text[pos : m.start()].split() + pos = m.start() + if paren == self._open_paren: + depth += 1 + if paren == self._close_paren: + if self._strict and depth == 0: + raise ValueError("Un-matched close paren at char %d" % m.start()) + depth = max(0, depth - 1) + if depth == 0: + result.append(text[pos : m.end()]) + pos = m.end() + if self._strict and depth > 0: + raise ValueError("Un-matched open paren at char %d" % pos) + if pos < len(text): + result.append(text[pos:]) + return result + + +sexpr_tokenize = SExprTokenizer().tokenize diff --git a/lib/python3.10/site-packages/nltk/tokenize/simple.py b/lib/python3.10/site-packages/nltk/tokenize/simple.py new file mode 100644 index 0000000000000000000000000000000000000000..f87b60a274c8121303ff60f203e1f3b991da1547 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tokenize/simple.py @@ -0,0 +1,137 @@ +# Natural Language Toolkit: Simple Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +r""" +Simple Tokenizers + +These tokenizers divide strings into substrings using the string +``split()`` method. +When tokenizing using a particular delimiter string, use +the string ``split()`` method directly, as this is more efficient. + +The simple tokenizers are *not* available as separate functions; +instead, you should just use the string ``split()`` method directly: + + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> s.split() # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + >>> s.split(' ') # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88\nin', 'New', 'York.', '', + 'Please', 'buy', 'me\ntwo', 'of', 'them.\n\nThanks.'] + >>> s.split('\n') # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', '', 'Thanks.'] + +The simple tokenizers are mainly useful because they follow the +standard ``TokenizerI`` interface, and so can be used with any code +that expects a tokenizer. For example, these tokenizers can be used +to specify the tokenization conventions when building a `CorpusReader`. + +""" + +from nltk.tokenize.api import StringTokenizer, TokenizerI +from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize + + +class SpaceTokenizer(StringTokenizer): + r"""Tokenize a string using the space character as a delimiter, + which is the same as ``s.split(' ')``. + + >>> from nltk.tokenize import SpaceTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> SpaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88\nin', 'New', 'York.', '', + 'Please', 'buy', 'me\ntwo', 'of', 'them.\n\nThanks.'] + """ + + _string = " " + + +class TabTokenizer(StringTokenizer): + r"""Tokenize a string use the tab character as a delimiter, + the same as ``s.split('\t')``. + + >>> from nltk.tokenize import TabTokenizer + >>> TabTokenizer().tokenize('a\tb c\n\t d') + ['a', 'b c\n', ' d'] + """ + + _string = "\t" + + +class CharTokenizer(StringTokenizer): + """Tokenize a string into individual characters. If this functionality + is ever required directly, use ``for char in string``. + """ + + def tokenize(self, s): + return list(s) + + def span_tokenize(self, s): + yield from enumerate(range(1, len(s) + 1)) + + +class LineTokenizer(TokenizerI): + r"""Tokenize a string into its lines, optionally discarding blank lines. + This is similar to ``s.split('\n')``. + + >>> from nltk.tokenize import LineTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', '', 'Thanks.'] + >>> # same as [l for l in s.split('\n') if l.strip()]: + >>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', 'Thanks.'] + + :param blanklines: Indicates how blank lines should be handled. Valid values are: + + - ``discard``: strip blank lines out of the token list before returning it. + A line is considered blank if it contains only whitespace characters. + - ``keep``: leave all blank lines in the token list. + - ``discard-eof``: if the string ends with a newline, then do not generate + a corresponding token ``''`` after that newline. + """ + + def __init__(self, blanklines="discard"): + valid_blanklines = ("discard", "keep", "discard-eof") + if blanklines not in valid_blanklines: + raise ValueError( + "Blank lines must be one of: %s" % " ".join(valid_blanklines) + ) + + self._blanklines = blanklines + + def tokenize(self, s): + lines = s.splitlines() + # If requested, strip off blank lines. + if self._blanklines == "discard": + lines = [l for l in lines if l.rstrip()] + elif self._blanklines == "discard-eof": + if lines and not lines[-1].strip(): + lines.pop() + return lines + + # discard-eof not implemented + def span_tokenize(self, s): + if self._blanklines == "keep": + yield from string_span_tokenize(s, r"\n") + else: + yield from regexp_span_tokenize(s, r"\n(\s+\n)*") + + +###################################################################### +# { Tokenization Functions +###################################################################### +# XXX: it is stated in module docs that there is no function versions + + +def line_tokenize(text, blanklines="discard"): + return LineTokenizer(blanklines).tokenize(text) diff --git a/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py b/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py new file mode 100644 index 0000000000000000000000000000000000000000..24e43caae2dae6e3c76e66704fa9b856a6dc348c --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py @@ -0,0 +1,194 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Hench +# Alex Estes +# URL: +# For license information, see LICENSE.TXT + +""" +The Sonority Sequencing Principle (SSP) is a language agnostic algorithm proposed +by Otto Jesperson in 1904. The sonorous quality of a phoneme is judged by the +openness of the lips. Syllable breaks occur before troughs in sonority. For more +on the SSP see Selkirk (1984). + +The default implementation uses the English alphabet, but the `sonority_hiearchy` +can be modified to IPA or any other alphabet for the use-case. The SSP is a +universal syllabification algorithm, but that does not mean it performs equally +across languages. Bartlett et al. (2009) is a good benchmark for English accuracy +if utilizing IPA (pg. 311). + +Importantly, if a custom hierarchy is supplied and vowels span across more than +one level, they should be given separately to the `vowels` class attribute. + +References: + +- Otto Jespersen. 1904. Lehrbuch der Phonetik. + Leipzig, Teubner. Chapter 13, Silbe, pp. 185-203. +- Elisabeth Selkirk. 1984. On the major class features and syllable theory. + In Aronoff & Oehrle (eds.) Language Sound Structure: Studies in Phonology. + Cambridge, MIT Press. pp. 107-136. +- Susan Bartlett, et al. 2009. On the Syllabification of Phonemes. + In HLT-NAACL. pp. 308-316. +""" + +import re +import warnings +from string import punctuation + +from nltk.tokenize.api import TokenizerI +from nltk.util import ngrams + + +class SyllableTokenizer(TokenizerI): + """ + Syllabifies words based on the Sonority Sequencing Principle (SSP). + + >>> from nltk.tokenize import SyllableTokenizer + >>> from nltk import word_tokenize + >>> SSP = SyllableTokenizer() + >>> SSP.tokenize('justification') + ['jus', 'ti', 'fi', 'ca', 'tion'] + >>> text = "This is a foobar-like sentence." + >>> [SSP.tokenize(token) for token in word_tokenize(text)] + [['This'], ['is'], ['a'], ['foo', 'bar', '-', 'li', 'ke'], ['sen', 'ten', 'ce'], ['.']] + """ + + def __init__(self, lang="en", sonority_hierarchy=False): + """ + :param lang: Language parameter, default is English, 'en' + :type lang: str + :param sonority_hierarchy: Sonority hierarchy according to the + Sonority Sequencing Principle. + :type sonority_hierarchy: list(str) + """ + # Sonority hierarchy should be provided in descending order. + # If vowels are spread across multiple levels, they should be + # passed assigned self.vowels var together, otherwise should be + # placed in first index of hierarchy. + if not sonority_hierarchy and lang == "en": + sonority_hierarchy = [ + "aeiouy", # vowels. + "lmnrw", # nasals. + "zvsf", # fricatives. + "bcdgtkpqxhj", # stops. + ] + + self.vowels = sonority_hierarchy[0] + self.phoneme_map = {} + for i, level in enumerate(sonority_hierarchy): + for c in level: + sonority_level = len(sonority_hierarchy) - i + self.phoneme_map[c] = sonority_level + self.phoneme_map[c.upper()] = sonority_level + + def assign_values(self, token): + """ + Assigns each phoneme its value from the sonority hierarchy. + Note: Sentence/text has to be tokenized first. + + :param token: Single word or token + :type token: str + :return: List of tuples, first element is character/phoneme and + second is the soronity value. + :rtype: list(tuple(str, int)) + """ + syllables_values = [] + for c in token: + try: + syllables_values.append((c, self.phoneme_map[c])) + except KeyError: + if c not in "0123456789" and c not in punctuation: + warnings.warn( + "Character not defined in sonority_hierarchy," + " assigning as vowel: '{}'".format(c) + ) + syllables_values.append((c, max(self.phoneme_map.values()))) + if c not in self.vowels: + self.vowels += c + else: # If it's a punctuation or numbers, assign -1. + syllables_values.append((c, -1)) + return syllables_values + + def validate_syllables(self, syllable_list): + """ + Ensures each syllable has at least one vowel. + If the following syllable doesn't have vowel, add it to the current one. + + :param syllable_list: Single word or token broken up into syllables. + :type syllable_list: list(str) + :return: Single word or token broken up into syllables + (with added syllables if necessary) + :rtype: list(str) + """ + valid_syllables = [] + front = "" + vowel_pattern = re.compile("|".join(self.vowels)) + for i, syllable in enumerate(syllable_list): + if syllable in punctuation: + valid_syllables.append(syllable) + continue + if not vowel_pattern.search(syllable): + if len(valid_syllables) == 0: + front += syllable + else: + valid_syllables = valid_syllables[:-1] + [ + valid_syllables[-1] + syllable + ] + else: + if len(valid_syllables) == 0: + valid_syllables.append(front + syllable) + else: + valid_syllables.append(syllable) + + return valid_syllables + + def tokenize(self, token): + """ + Apply the SSP to return a list of syllables. + Note: Sentence/text has to be tokenized first. + + :param token: Single word or token + :type token: str + :return syllable_list: Single word or token broken up into syllables. + :rtype: list(str) + """ + # assign values from hierarchy + syllables_values = self.assign_values(token) + + # if only one vowel return word + if sum(token.count(x) for x in self.vowels) <= 1: + return [token] + + syllable_list = [] + syllable = syllables_values[0][0] # start syllable with first phoneme + for trigram in ngrams(syllables_values, n=3): + phonemes, values = zip(*trigram) + # Sonority of previous, focal and following phoneme + prev_value, focal_value, next_value = values + # Focal phoneme. + focal_phoneme = phonemes[1] + + # These cases trigger syllable break. + if focal_value == -1: # If it's a punctuation, just break. + syllable_list.append(syllable) + syllable_list.append(focal_phoneme) + syllable = "" + elif prev_value >= focal_value == next_value: + syllable += focal_phoneme + syllable_list.append(syllable) + syllable = "" + + elif prev_value > focal_value < next_value: + syllable_list.append(syllable) + syllable = "" + syllable += focal_phoneme + + # no syllable break + else: + syllable += focal_phoneme + + syllable += syllables_values[-1][0] # append last phoneme + syllable_list.append(syllable) + + return self.validate_syllables(syllable_list) diff --git a/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py b/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py new file mode 100644 index 0000000000000000000000000000000000000000..ff3f16621e3a3c38ee0265e817b04c655856dd70 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py @@ -0,0 +1,292 @@ +#!/usr/bin/env python +# Natural Language Toolkit: Interface to the Stanford Segmenter +# for Chinese and Arabic +# +# Copyright (C) 2001-2023 NLTK Project +# Author: 52nlp <52nlpcn@gmail.com> +# Casper Lehmann-Strøm +# Alex Constantin +# +# URL: +# For license information, see LICENSE.TXT + +import json +import os +import tempfile +import warnings +from subprocess import PIPE + +from nltk.internals import ( + _java_options, + config_java, + find_dir, + find_file, + find_jar, + java, +) +from nltk.tokenize.api import TokenizerI + +_stanford_url = "https://nlp.stanford.edu/software" + + +class StanfordSegmenter(TokenizerI): + """Interface to the Stanford Segmenter + + If stanford-segmenter version is older than 2016-10-31, then path_to_slf4j + should be provieded, for example:: + + seg = StanfordSegmenter(path_to_slf4j='/YOUR_PATH/slf4j-api.jar') + + >>> from nltk.tokenize.stanford_segmenter import StanfordSegmenter + >>> seg = StanfordSegmenter() # doctest: +SKIP + >>> seg.default_config('zh') # doctest: +SKIP + >>> sent = u'这是斯坦福中文分词器测试' + >>> print(seg.segment(sent)) # doctest: +SKIP + \u8fd9 \u662f \u65af\u5766\u798f \u4e2d\u6587 \u5206\u8bcd\u5668 \u6d4b\u8bd5 + + >>> seg.default_config('ar') # doctest: +SKIP + >>> sent = u'هذا هو تصنيف ستانفورد العربي للكلمات' + >>> print(seg.segment(sent.split())) # doctest: +SKIP + \u0647\u0630\u0627 \u0647\u0648 \u062a\u0635\u0646\u064a\u0641 \u0633\u062a\u0627\u0646\u0641\u0648\u0631\u062f \u0627\u0644\u0639\u0631\u0628\u064a \u0644 \u0627\u0644\u0643\u0644\u0645\u0627\u062a + + """ + + _JAR = "stanford-segmenter.jar" + + def __init__( + self, + path_to_jar=None, + path_to_slf4j=None, + java_class=None, + path_to_model=None, + path_to_dict=None, + path_to_sihan_corpora_dict=None, + sihan_post_processing="false", + keep_whitespaces="false", + encoding="UTF-8", + options=None, + verbose=False, + java_options="-mx2g", + ): + # Raise deprecation warning. + warnings.simplefilter("always", DeprecationWarning) + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.5.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPTokenizer\033[0m instead.'" + ), + DeprecationWarning, + stacklevel=2, + ) + warnings.simplefilter("ignore", DeprecationWarning) + + stanford_segmenter = find_jar( + self._JAR, + path_to_jar, + env_vars=("STANFORD_SEGMENTER",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + if path_to_slf4j is not None: + slf4j = find_jar( + "slf4j-api.jar", + path_to_slf4j, + env_vars=("SLF4J", "STANFORD_SEGMENTER"), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + else: + slf4j = None + + # This is passed to java as the -cp option, the old version of segmenter needs slf4j. + # The new version of stanford-segmenter-2016-10-31 doesn't need slf4j + self._stanford_jar = os.pathsep.join( + _ for _ in [stanford_segmenter, slf4j] if _ is not None + ) + + self._java_class = java_class + self._model = path_to_model + self._sihan_corpora_dict = path_to_sihan_corpora_dict + self._sihan_post_processing = sihan_post_processing + self._keep_whitespaces = keep_whitespaces + self._dict = path_to_dict + + self._encoding = encoding + self.java_options = java_options + options = {} if options is None else options + self._options_cmd = ",".join( + f"{key}={json.dumps(val)}" for key, val in options.items() + ) + + def default_config(self, lang): + """ + Attempt to initialize Stanford Word Segmenter for the specified language + using the STANFORD_SEGMENTER and STANFORD_MODELS environment variables + """ + + search_path = () + if os.environ.get("STANFORD_SEGMENTER"): + search_path = {os.path.join(os.environ.get("STANFORD_SEGMENTER"), "data")} + + # init for Chinese-specific files + self._dict = None + self._sihan_corpora_dict = None + self._sihan_post_processing = "false" + + if lang == "ar": + self._java_class = ( + "edu.stanford.nlp.international.arabic.process.ArabicSegmenter" + ) + model = "arabic-segmenter-atb+bn+arztrain.ser.gz" + + elif lang == "zh": + self._java_class = "edu.stanford.nlp.ie.crf.CRFClassifier" + model = "pku.gz" + self._sihan_post_processing = "true" + + path_to_dict = "dict-chris6.ser.gz" + try: + self._dict = find_file( + path_to_dict, + searchpath=search_path, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_MODELS",), + ) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using env. " + "variables STANFORD_MODELS and /data/)" + % path_to_dict + ) from e + + sihan_dir = "./data/" + try: + path_to_sihan_dir = find_dir( + sihan_dir, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_SEGMENTER",), + ) + self._sihan_corpora_dict = os.path.join(path_to_sihan_dir, sihan_dir) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using the " + "STANFORD_SEGMENTER environment variable)" % sihan_dir + ) from e + else: + raise LookupError(f"Unsupported language {lang}") + + try: + self._model = find_file( + model, + searchpath=search_path, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_MODELS", "STANFORD_SEGMENTER"), + ) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using env. " + "variables STANFORD_MODELS and /data/)" % model + ) from e + + def tokenize(self, s): + super().tokenize(s) + + def segment_file(self, input_file_path): + """ """ + cmd = [ + self._java_class, + "-loadClassifier", + self._model, + "-keepAllWhitespaces", + self._keep_whitespaces, + "-textFile", + input_file_path, + ] + if self._sihan_corpora_dict is not None: + cmd.extend( + [ + "-serDictionary", + self._dict, + "-sighanCorporaDict", + self._sihan_corpora_dict, + "-sighanPostProcessing", + self._sihan_post_processing, + ] + ) + + stdout = self._execute(cmd) + + return stdout + + def segment(self, tokens): + return self.segment_sents([tokens]) + + def segment_sents(self, sentences): + """ """ + encoding = self._encoding + # Create a temporary input file + _input_fh, self._input_file_path = tempfile.mkstemp(text=True) + + # Write the actural sentences to the temporary input file + _input_fh = os.fdopen(_input_fh, "wb") + _input = "\n".join(" ".join(x) for x in sentences) + if isinstance(_input, str) and encoding: + _input = _input.encode(encoding) + _input_fh.write(_input) + _input_fh.close() + + cmd = [ + self._java_class, + "-loadClassifier", + self._model, + "-keepAllWhitespaces", + self._keep_whitespaces, + "-textFile", + self._input_file_path, + ] + if self._sihan_corpora_dict is not None: + cmd.extend( + [ + "-serDictionary", + self._dict, + "-sighanCorporaDict", + self._sihan_corpora_dict, + "-sighanPostProcessing", + self._sihan_post_processing, + ] + ) + + stdout = self._execute(cmd) + + # Delete the temporary file + os.unlink(self._input_file_path) + + return stdout + + def _execute(self, cmd, verbose=False): + encoding = self._encoding + cmd.extend(["-inputEncoding", encoding]) + _options_cmd = self._options_cmd + if _options_cmd: + cmd.extend(["-options", self._options_cmd]) + + default_options = " ".join(_java_options) + + # Configure java. + config_java(options=self.java_options, verbose=verbose) + + stdout, _stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stdout = stdout.decode(encoding) + + # Return java configurations to their default values. + config_java(options=default_options, verbose=False) + + return stdout diff --git a/lib/python3.10/site-packages/nltk/tokenize/treebank.py b/lib/python3.10/site-packages/nltk/tokenize/treebank.py new file mode 100644 index 0000000000000000000000000000000000000000..e107f3838d965fd50270082efd4fe804ffcbe08d --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tokenize/treebank.py @@ -0,0 +1,402 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Michael Heilman (re-port from http://www.cis.upenn.edu/~treebank/tokenizer.sed) +# Tom Aarsen <> (modifications) +# +# URL: +# For license information, see LICENSE.TXT + +r""" + +Penn Treebank Tokenizer + +The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. +This implementation is a port of the tokenizer sed script written by Robert McIntyre +and available at http://www.cis.upenn.edu/~treebank/tokenizer.sed. +""" + +import re +import warnings +from typing import Iterator, List, Tuple + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.destructive import MacIntyreContractions +from nltk.tokenize.util import align_tokens + + +class TreebankWordTokenizer(TokenizerI): + r""" + The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. + + This tokenizer performs the following steps: + + - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` + - treat most punctuation characters as separate tokens + - split off commas and single quotes, when followed by whitespace + - separate periods that appear at the end of line + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> TreebankWordTokenizer().tokenize(s) + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks', '.'] + >>> s = "They'll save and invest more." + >>> TreebankWordTokenizer().tokenize(s) + ['They', "'ll", 'save', 'and', 'invest', 'more', '.'] + >>> s = "hi, my name can't hello," + >>> TreebankWordTokenizer().tokenize(s) + ['hi', ',', 'my', 'name', 'ca', "n't", 'hello', ','] + """ + + # starting quotes + STARTING_QUOTES = [ + (re.compile(r"^\""), r"``"), + (re.compile(r"(``)"), r" \1 "), + (re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "), + ] + + # punctuation + PUNCTUATION = [ + (re.compile(r"([:,])([^\d])"), r" \1 \2"), + (re.compile(r"([:,])$"), r" \1 "), + (re.compile(r"\.\.\."), r" ... "), + (re.compile(r"[;@#$%&]"), r" \g<0> "), + ( + re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), + r"\1 \2\3 ", + ), # Handles the final period. + (re.compile(r"[?!]"), r" \g<0> "), + (re.compile(r"([^'])' "), r"\1 ' "), + ] + + # Pads parentheses + PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ") + + # Optionally: Convert parentheses, brackets and converts them to PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile(r"\("), "-LRB-"), + (re.compile(r"\)"), "-RRB-"), + (re.compile(r"\["), "-LSB-"), + (re.compile(r"\]"), "-RSB-"), + (re.compile(r"\{"), "-LCB-"), + (re.compile(r"\}"), "-RCB-"), + ] + + DOUBLE_DASHES = (re.compile(r"--"), r" -- ") + + # ending quotes + ENDING_QUOTES = [ + (re.compile(r"''"), " '' "), + (re.compile(r'"'), " '' "), + (re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "), + (re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "), + ] + + # List of contractions adapted from Robert MacIntyre's tokenizer. + _contractions = MacIntyreContractions() + CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2)) + CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3)) + + def tokenize( + self, text: str, convert_parentheses: bool = False, return_str: bool = False + ) -> List[str]: + r"""Return a tokenized copy of `text`. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88 (roughly 3,36 euros)\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> TreebankWordTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '(', 'roughly', '3,36', + 'euros', ')', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + >>> TreebankWordTokenizer().tokenize(s, convert_parentheses=True) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '-LRB-', 'roughly', '3,36', + 'euros', '-RRB-', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + + :param text: A string with a sentence or sentences. + :type text: str + :param convert_parentheses: if True, replace parentheses to PTB symbols, + e.g. `(` to `-LRB-`. Defaults to False. + :type convert_parentheses: bool, optional + :param return_str: If True, return tokens as space-separated string, + defaults to False. + :type return_str: bool, optional + :return: List of tokens from `text`. + :rtype: List[str] + """ + if return_str is not False: + warnings.warn( + "Parameter 'return_str' has been deprecated and should no " + "longer be used.", + category=DeprecationWarning, + stacklevel=2, + ) + + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Handles parentheses. + regexp, substitution = self.PARENS_BRACKETS + text = regexp.sub(substitution, text) + # Optionally convert parentheses + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Handles double dash. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + # add extra space to make things easier + text = " " + text + " " + + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r" \1 \2 ", text) + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r" \1 \2 ", text) + + # We are not using CONTRACTIONS4 since + # they are also commented out in the SED scripts + # for regexp in self._contractions.CONTRACTIONS4: + # text = regexp.sub(r' \1 \2 \3 ', text) + + return text.split() + + def span_tokenize(self, text: str) -> Iterator[Tuple[int, int]]: + r""" + Returns the spans of the tokens in ``text``. + Uses the post-hoc nltk.tokens.align_tokens to return the offset spans. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), + ... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), + ... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), + ... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)] + >>> list(TreebankWordTokenizer().span_tokenize(s)) == expected + True + >>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')', + ... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.'] + >>> [s[start:end] for start, end in TreebankWordTokenizer().span_tokenize(s)] == expected + True + + :param text: A string with a sentence or sentences. + :type text: str + :yield: Tuple[int, int] + """ + raw_tokens = self.tokenize(text) + + # Convert converted quotes back to original double quotes + # Do this only if original text contains double quote(s) or double + # single-quotes (because '' might be transformed to `` if it is + # treated as starting quotes). + if ('"' in text) or ("''" in text): + # Find double quotes and converted quotes + matched = [m.group() for m in re.finditer(r"``|'{2}|\"", text)] + + # Replace converted quotes back to double quotes + tokens = [ + matched.pop(0) if tok in ['"', "``", "''"] else tok + for tok in raw_tokens + ] + else: + tokens = raw_tokens + + yield from align_tokens(tokens, text) + + +class TreebankWordDetokenizer(TokenizerI): + r""" + The Treebank detokenizer uses the reverse regex operations corresponding to + the Treebank tokenizer's regexes. + + Note: + + - There're additional assumption mades when undoing the padding of ``[;@#$%&]`` + punctuation symbols that isn't presupposed in the TreebankTokenizer. + - There're additional regexes added in reversing the parentheses tokenization, + such as the ``r'([\]\)\}\>])\s([:;,.])'``, which removes the additional right + padding added to the closing parentheses precedding ``[:;,.]``. + - It's not possible to return the original whitespaces as they were because + there wasn't explicit records of where `'\n'`, `'\t'` or `'\s'` were removed at + the text.split() operation. + + >>> from nltk.tokenize.treebank import TreebankWordTokenizer, TreebankWordDetokenizer + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> d = TreebankWordDetokenizer() + >>> t = TreebankWordTokenizer() + >>> toks = t.tokenize(s) + >>> d.detokenize(toks) + 'Good muffins cost $3.88 in New York. Please buy me two of them. Thanks.' + + The MXPOST parentheses substitution can be undone using the ``convert_parentheses`` + parameter: + + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected_tokens = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '-LRB-', 'York', '-RRB-', '.', 'Please', '-LRB-', 'buy', + ... '-RRB-', 'me', 'two', 'of', 'them.', '-LRB-', 'Thanks', '-RRB-', '.'] + >>> expected_tokens == t.tokenize(s, convert_parentheses=True) + True + >>> expected_detoken = 'Good muffins cost $3.88 in New (York). Please (buy) me two of them. (Thanks).' + >>> expected_detoken == d.detokenize(t.tokenize(s, convert_parentheses=True), convert_parentheses=True) + True + + During tokenization it's safe to add more spaces but during detokenization, + simply undoing the padding doesn't really help. + + - During tokenization, left and right pad is added to ``[!?]``, when + detokenizing, only left shift the ``[!?]`` is needed. + Thus ``(re.compile(r'\s([?!])'), r'\g<1>')``. + + - During tokenization ``[:,]`` are left and right padded but when detokenizing, + only left shift is necessary and we keep right pad after comma/colon + if the string after is a non-digit. + Thus ``(re.compile(r'\s([:,])\s([^\d])'), r'\1 \2')``. + + >>> from nltk.tokenize.treebank import TreebankWordDetokenizer + >>> toks = ['hello', ',', 'i', 'ca', "n't", 'feel', 'my', 'feet', '!', 'Help', '!', '!'] + >>> twd = TreebankWordDetokenizer() + >>> twd.detokenize(toks) + "hello, i can't feel my feet! Help!!" + + >>> toks = ['hello', ',', 'i', "can't", 'feel', ';', 'my', 'feet', '!', + ... 'Help', '!', '!', 'He', 'said', ':', 'Help', ',', 'help', '?', '!'] + >>> twd.detokenize(toks) + "hello, i can't feel; my feet! Help!! He said: Help, help?!" + """ + + _contractions = MacIntyreContractions() + CONTRACTIONS2 = [ + re.compile(pattern.replace("(?#X)", r"\s")) + for pattern in _contractions.CONTRACTIONS2 + ] + CONTRACTIONS3 = [ + re.compile(pattern.replace("(?#X)", r"\s")) + for pattern in _contractions.CONTRACTIONS3 + ] + + # ending quotes + ENDING_QUOTES = [ + (re.compile(r"([^' ])\s('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1\2 "), + (re.compile(r"([^' ])\s('[sS]|'[mM]|'[dD]|') "), r"\1\2 "), + (re.compile(r"(\S)\s(\'\')"), r"\1\2"), + ( + re.compile(r"(\'\')\s([.,:)\]>};%])"), + r"\1\2", + ), # Quotes followed by no-left-padded punctuations. + (re.compile(r"''"), '"'), + ] + + # Handles double dashes + DOUBLE_DASHES = (re.compile(r" -- "), r"--") + + # Optionally: Convert parentheses, brackets and converts them from PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile("-LRB-"), "("), + (re.compile("-RRB-"), ")"), + (re.compile("-LSB-"), "["), + (re.compile("-RSB-"), "]"), + (re.compile("-LCB-"), "{"), + (re.compile("-RCB-"), "}"), + ] + + # Undo padding on parentheses. + PARENS_BRACKETS = [ + (re.compile(r"([\[\(\{\<])\s"), r"\g<1>"), + (re.compile(r"\s([\]\)\}\>])"), r"\g<1>"), + (re.compile(r"([\]\)\}\>])\s([:;,.])"), r"\1\2"), + ] + + # punctuation + PUNCTUATION = [ + (re.compile(r"([^'])\s'\s"), r"\1' "), + (re.compile(r"\s([?!])"), r"\g<1>"), # Strip left pad for [?!] + # (re.compile(r'\s([?!])\s'), r'\g<1>'), + (re.compile(r'([^\.])\s(\.)([\]\)}>"\']*)\s*$'), r"\1\2\3"), + # When tokenizing, [;@#$%&] are padded with whitespace regardless of + # whether there are spaces before or after them. + # But during detokenization, we need to distinguish between left/right + # pad, so we split this up. + (re.compile(r"([#$])\s"), r"\g<1>"), # Left pad. + (re.compile(r"\s([;%])"), r"\g<1>"), # Right pad. + # (re.compile(r"\s([&*])\s"), r" \g<1> "), # Unknown pad. + (re.compile(r"\s\.\.\.\s"), r"..."), + # (re.compile(r"\s([:,])\s$"), r"\1"), # .strip() takes care of it. + ( + re.compile(r"\s([:,])"), + r"\1", + ), # Just remove left padding. Punctuation in numbers won't be padded. + ] + + # starting quotes + STARTING_QUOTES = [ + (re.compile(r"([ (\[{<])\s``"), r"\1``"), + (re.compile(r"(``)\s"), r"\1"), + (re.compile(r"``"), r'"'), + ] + + def tokenize(self, tokens: List[str], convert_parentheses: bool = False) -> str: + """ + Treebank detokenizer, created by undoing the regexes from + the TreebankWordTokenizer.tokenize. + + :param tokens: A list of strings, i.e. tokenized text. + :type tokens: List[str] + :param convert_parentheses: if True, replace PTB symbols with parentheses, + e.g. `-LRB-` to `(`. Defaults to False. + :type convert_parentheses: bool, optional + :return: str + """ + text = " ".join(tokens) + + # Add extra space to make things easier + text = " " + text + " " + + # Reverse the contractions regexes. + # Note: CONTRACTIONS4 are not used in tokenization. + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r"\1\2", text) + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r"\1\2", text) + + # Reverse the regexes applied for ending quotes. + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + # Undo the space padding. + text = text.strip() + + # Reverse the padding on double dashes. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Reverse the padding regexes applied for parenthesis/brackets. + for regexp, substitution in self.PARENS_BRACKETS: + text = regexp.sub(substitution, text) + + # Reverse the regexes applied for punctuations. + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Reverse the regexes applied for starting quotes. + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + return text.strip() + + def detokenize(self, tokens: List[str], convert_parentheses: bool = False) -> str: + """Duck-typing the abstract *tokenize()*.""" + return self.tokenize(tokens, convert_parentheses) diff --git a/lib/python3.10/site-packages/numba-0.61.0.dist-info/LICENSE b/lib/python3.10/site-packages/numba-0.61.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7d19426e7a09d04cef6dbd2a2857434036362ee1 --- /dev/null +++ b/lib/python3.10/site-packages/numba-0.61.0.dist-info/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2012, Anaconda, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/lib/python3.10/site-packages/rpds_py-0.23.1.dist-info/INSTALLER b/lib/python3.10/site-packages/rpds_py-0.23.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/lib/python3.10/site-packages/rpds_py-0.23.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/lib/python3.10/site-packages/triton-3.2.0.dist-info/INSTALLER b/lib/python3.10/site-packages/triton-3.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/lib/python3.10/site-packages/triton-3.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/lib/python3.10/site-packages/triton-3.2.0.dist-info/METADATA b/lib/python3.10/site-packages/triton-3.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3c296c8d892606cf442f810a616e3051849f9d7d --- /dev/null +++ b/lib/python3.10/site-packages/triton-3.2.0.dist-info/METADATA @@ -0,0 +1,39 @@ +Metadata-Version: 2.2 +Name: triton +Version: 3.2.0 +Summary: A language and compiler for custom Deep Learning operations +Home-page: https://github.com/triton-lang/triton/ +Author: Philippe Tillet +Author-email: phil@openai.com +Keywords: Compiler,Deep Learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Topic :: Software Development :: Build Tools +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Provides-Extra: build +Requires-Dist: cmake>=3.20; extra == "build" +Requires-Dist: lit; extra == "build" +Provides-Extra: tests +Requires-Dist: autopep8; extra == "tests" +Requires-Dist: flake8; extra == "tests" +Requires-Dist: isort; extra == "tests" +Requires-Dist: numpy; extra == "tests" +Requires-Dist: pytest; extra == "tests" +Requires-Dist: scipy>=1.7.1; extra == "tests" +Requires-Dist: llnl-hatchet; extra == "tests" +Provides-Extra: tutorials +Requires-Dist: matplotlib; extra == "tutorials" +Requires-Dist: pandas; extra == "tutorials" +Requires-Dist: tabulate; extra == "tutorials" +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: home-page +Dynamic: keywords +Dynamic: provides-extra +Dynamic: summary diff --git a/lib/python3.10/site-packages/triton-3.2.0.dist-info/RECORD b/lib/python3.10/site-packages/triton-3.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f794db1f73852fd146f37901605d31d7b4ad3e22 --- /dev/null +++ b/lib/python3.10/site-packages/triton-3.2.0.dist-info/RECORD @@ -0,0 +1,324 @@ +../../../bin/proton,sha256=3epEVP-nFKgCkBwERj8hUoUNwKxU0S8iCm_Pmg69nv4,333 +../../../bin/proton-viewer,sha256=xhGQhZ3l6xos8yNPb_XGpR532Jir3GtnCwounhtGbBM,333 +triton-3.2.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +triton-3.2.0.dist-info/METADATA,sha256=60U7BH_G-oh95wpQW9hoEjlXtYzxLHgSnwAgsQRqbVg,1423 +triton-3.2.0.dist-info/RECORD,, +triton-3.2.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +triton-3.2.0.dist-info/WHEEL,sha256=ViyZsTV2upbIniGkknQiIrLPLs1cJIoIfr1wsV7PMic,151 +triton-3.2.0.dist-info/entry_points.txt,sha256=SAiHYj5xxm1U5d8569PbMXmtWkKGNtiyy7LeTlUHalM,99 +triton-3.2.0.dist-info/top_level.txt,sha256=cG17rIqlZ8ppA2uLVwUB95KPu9agPMZPuHkrMnQCppQ,227 +triton/_C/libproton.so,sha256=Y6DhcSfX-4eysyerrd6SqPqOpGL1eOgMYQts5jEZ6qY,18083272 +triton/_C/libtriton.so,sha256=tHhxn9zUvRdymyUEf9CA9ao8zWC7QzPrMjg1ijSkeVk,534321632 +triton/__init__.py,sha256=buC26NIOMT6cwmoYkc5jk46jb3fploY6_bOZYKwzdKQ,1347 +triton/_internal_testing.py,sha256=4pzyc_36u05khUveZ1TWL3MQ-7EVMJ1C2c1dRd8BMsw,4269 +triton/backends/__init__.py,sha256=opAo_vgEMt3tLO_bYFrYGksnIu0qohbmyuu_s3-rNAs,1595 +triton/backends/amd/compiler.py,sha256=0FnS5mBLsPB9FQzNr3I9BPd3-KEB1Do29hDsnFGu25k,16203 +triton/backends/amd/driver.c,sha256=obiiiPndny5NyhUcJ8iyrVHrXU1ruLpLGd_LgaKQEbU,8459 +triton/backends/amd/driver.py,sha256=leVYGX_wugGapdDc7o5hwMVduTrNRVp4XTa93FViVa8,18593 +triton/backends/amd/include/hip/amd_detail/amd_channel_descriptor.h,sha256=_2myGIdBTE0plFbGKOSx8HUqGZd0UBHo-YvKe2xkpbU,11708 +triton/backends/amd/include/hip/amd_detail/amd_device_functions.h,sha256=zfYTHJE_M_y2Y2ssP8ZH_EOczMBg4Iq2guglaKcI5js,31425 +triton/backends/amd/include/hip/amd_detail/amd_hip_atomic.h,sha256=PJRRTp83M0jIEBA_iWzfWwHZelSbL3TBrSDqlO3SQtk,49919 +triton/backends/amd/include/hip/amd_detail/amd_hip_bf16.h,sha256=fucv1_06JHVm82T0TmvERBbmtZTDQK6WJi_58oGQOXg,40634 +triton/backends/amd/include/hip/amd_detail/amd_hip_bfloat16.h,sha256=cFJlQEELGau_9geACeuiiFHyuAWCD6-VuSqcTnqajX0,9484 +triton/backends/amd/include/hip/amd_detail/amd_hip_common.h,sha256=dzkuIzuklqTRaNJjKLqfFEm6Fh4tK_FkTjYHFsZkmCI,1370 +triton/backends/amd/include/hip/amd_detail/amd_hip_complex.h,sha256=SEygl8X_MCXDVXxNIBm5Ds0eWwa-ojVXUUW48SIgsX8,5855 +triton/backends/amd/include/hip/amd_detail/amd_hip_cooperative_groups.h,sha256=SvrkniHiDGt-ztZRBvbkyajfUxTbGQzpZC1gnd4T-i8,31624 +triton/backends/amd/include/hip/amd_detail/amd_hip_fp16.h,sha256=86Nw97iaiC4QV5xBv8d3Bwc4FioMh5DQuCHj3sh_Yrw,57854 +triton/backends/amd/include/hip/amd_detail/amd_hip_gl_interop.h,sha256=9vxiV6rYRMGx12TPnrAVRvrfLyoRp74XRgKSPBPa2hk,3860 +triton/backends/amd/include/hip/amd_detail/amd_hip_math_constants.h,sha256=u1fIaf-AiWF70ZA1zxVkUIbRqoJLu5lrfYbgt_usySk,5890 +triton/backends/amd/include/hip/amd_detail/amd_hip_runtime.h,sha256=ZvDsQ0AiZnJ178NuAsA7AuHrySXbN3aFs5Z9m2tsIDg,13954 +triton/backends/amd/include/hip/amd_detail/amd_hip_runtime_pt_api.h,sha256=fc4mtHBkWmiSRh8m-dxIxvu9zsweLTwEgohkntYcgJw,9997 +triton/backends/amd/include/hip/amd_detail/amd_hip_unsafe_atomics.h,sha256=w9nJ1S32GRl_ejDiGacteM6Zf84iovIifAzWX8Bze0Q,24202 +triton/backends/amd/include/hip/amd_detail/amd_hip_vector_types.h,sha256=qPdmRJnzlgtjVshkafoHxdHoMLkoYS9U-ZD-TjLznr0,57088 +triton/backends/amd/include/hip/amd_detail/amd_math_functions.h,sha256=46wiaEMStCczEsHtccgHlATfw_0O5j6Z8rlFkC7bmUA,3171 +triton/backends/amd/include/hip/amd_detail/amd_surface_functions.h,sha256=rsQuylNqmNhLb7PZjBz7WbruD_6YIXtOptY2BNJDxVU,11062 +triton/backends/amd/include/hip/amd_detail/amd_warp_functions.h,sha256=p8DdtuxqlgGHzKdVPMHDnZOD8zA5f6GjLHYMr0_FKjQ,18966 +triton/backends/amd/include/hip/amd_detail/concepts.hpp,sha256=7EOkpr2w2-jclUQ115yxtFCkBWJ7btUzhBOe-mR0N0M,1252 +triton/backends/amd/include/hip/amd_detail/device_library_decls.h,sha256=4clSpgf898UVjfZFVnDkcYi75A27crPsuFtLcs1s4KU,7457 +triton/backends/amd/include/hip/amd_detail/functional_grid_launch.hpp,sha256=u7hRB9kQXX575a5C7cV3gKow55DSBUCwO0dTjIswlag,8129 +triton/backends/amd/include/hip/amd_detail/grid_launch.h,sha256=tNS7CQw9gy-z930CElH3n6c5iMvpsQ_WFZK024mNzEo,1830 +triton/backends/amd/include/hip/amd_detail/grid_launch.hpp,sha256=EuAlM3olyrArebqwW5eSxo4gfjvWCGOAGAuLLmFttgw,1370 +triton/backends/amd/include/hip/amd_detail/grid_launch_GGL.hpp,sha256=KpQAuyy1Dyt45WcPaR_x-Ex-onPGEHA01DBbla7TT-k,1219 +triton/backends/amd/include/hip/amd_detail/helpers.hpp,sha256=hi2pW1mXQnbIwvmwWt_nG6A38sqLOd-QP5S9sETTs60,5707 +triton/backends/amd/include/hip/amd_detail/hip_api_trace.hpp,sha256=d01j4SFQP_6ALwUHByxznZV8SrQHbuujRYon8rxFw-I,94612 +triton/backends/amd/include/hip/amd_detail/hip_assert.h,sha256=fNsG23KISuY-k5JFoX-5hZ7qGQScisXuHcdEwYlXOqw,3978 +triton/backends/amd/include/hip/amd_detail/hip_cooperative_groups_helper.h,sha256=tQ_XIvGKhvrj1h7gY-IVLmKvIPhsQa0YsBflxdhUHP8,7957 +triton/backends/amd/include/hip/amd_detail/hip_fp16_gcc.h,sha256=BtFsKmTptN4TOHocEicfNbBl2JCdZWKm_bd5mc5OzYY,6660 +triton/backends/amd/include/hip/amd_detail/hip_fp16_math_fwd.h,sha256=63tKWMPdW56qWlH_HbCaF_isVXufm514ol_SxL4YjTQ,5134 +triton/backends/amd/include/hip/amd_detail/hip_ldg.h,sha256=KAEZb9H4z4DDrkaloMOeWzahiDfI2V6c68vWT3jb5fU,3652 +triton/backends/amd/include/hip/amd_detail/hip_prof_str.h,sha256=s1T2IrCwYzZQOuCs5ppuegFQbjXSF2JA1eUSCmZg9AA,621355 +triton/backends/amd/include/hip/amd_detail/hip_runtime_prof.h,sha256=6GVfh1la0wtBVwdKX5y0C32dPD9shJp1o8wZdHsjZHA,2715 +triton/backends/amd/include/hip/amd_detail/host_defines.h,sha256=h_ZpFE4Clm2iyRyJevDb57Y-gC-6RVPjhnZ5rzPxiUo,7038 +triton/backends/amd/include/hip/amd_detail/hsa_helpers.hpp,sha256=Os-sJQOFI_0Abh8Ql05s0Rtfruk4NsSMfg7BtugxMgg,3232 +triton/backends/amd/include/hip/amd_detail/macro_based_grid_launch.hpp,sha256=6ocsArNa9_R6D6XCuNy8Zq23KG-j2uYsjqNCtnMrJws,67925 +triton/backends/amd/include/hip/amd_detail/math_fwd.h,sha256=nup5YhceJnngoLJCESI8qX08dNpbZci0i78WKu-wfdI,17000 +triton/backends/amd/include/hip/amd_detail/ockl_image.h,sha256=LzRPGMb515_iIAIIcbb2uQB-bTvT4xOjY51VdARD7lc,10538 +triton/backends/amd/include/hip/amd_detail/program_state.hpp,sha256=8QE9OmB8OKTy7rBr3EYEizJI2s-_1tgXpgU7zCA2Ky0,3154 +triton/backends/amd/include/hip/amd_detail/texture_fetch_functions.h,sha256=Ex1lF2gBWJxtC3yP9pXRSFywMp3gbEmyl0Sw8iL91yM,17787 +triton/backends/amd/include/hip/amd_detail/texture_indirect_functions.h,sha256=KkW5o5gMpoVMTRwzfXHA7-kZ9ynI8OaIw6jJ1EB1s98,18447 +triton/backends/amd/include/hip/channel_descriptor.h,sha256=gTYe7SzIg-m3ThOQY2vr5Rh6-uWvUP_d37v8F4T2Q14,1773 +triton/backends/amd/include/hip/device_functions.h,sha256=vkybrdk6wyZP-T1I5PRjtfcMqGYXDeBpB5jhYj358GU,1589 +triton/backends/amd/include/hip/driver_types.h,sha256=m1HI80HC80qkTeco2Jd07woL_jTy48lz9JiDCV_8zsg,18985 +triton/backends/amd/include/hip/hip_bf16.h,sha256=lLw6K5ltb6AqSuINYTq8flxxsDkBP8Y2zbqmUjBcG9c,1571 +triton/backends/amd/include/hip/hip_bfloat16.h,sha256=Nqoy9VjfjglVx2_NJcp8hyT1sJUukXRWj8XMlidv1yA,1755 +triton/backends/amd/include/hip/hip_common.h,sha256=q5aPhG3DHW0iUJ7ayS5lfM_ZnZQNbMmLmfdHlOwbPdA,3450 +triton/backends/amd/include/hip/hip_complex.h,sha256=TmdzQP5oVPfhBVARJYcR5eyv9HInmKMFuFoQ_1ECk_I,1594 +triton/backends/amd/include/hip/hip_cooperative_groups.h,sha256=gMLvaYQ3b-f1vcoMtEwtkN0hO5__zNfP5p5oBKmv_SE,1878 +triton/backends/amd/include/hip/hip_deprecated.h,sha256=gFLuCuKn7R_xCfum_i_Q-vi3Lg8NWHKphKZKze8DwEo,6340 +triton/backends/amd/include/hip/hip_ext.h,sha256=jK1Qc-SXgUyRTj8bBa9ZP__95Qgd2-W1mwnJo6Qpnoo,8560 +triton/backends/amd/include/hip/hip_fp16.h,sha256=vKJh-zgDWUW7NyXxtv2ho6aVLXX8BIPfzCigEQ5d6I4,1523 +triton/backends/amd/include/hip/hip_gl_interop.h,sha256=-GwkSFMBneM8akFE7pqlhi0k-Ft2uz5674wGoiaU43Q,1438 +triton/backends/amd/include/hip/hip_hcc.h,sha256=RYrArDlnTEP89xKbzIpW17_bsBY5moCitq00PL-4oWI,1307 +triton/backends/amd/include/hip/hip_math_constants.h,sha256=8bSfve5E7cDuvNAUkFUeQwSLg3iJJHuqhuD4FmHNxEM,1588 +triton/backends/amd/include/hip/hip_profile.h,sha256=sjsNuduu5Jd6s7sJndZvZLlE0RZ0wN1rTVwv5nR7If0,1304 +triton/backends/amd/include/hip/hip_runtime.h,sha256=uy90l8Nep6xNUzeGcHMoDv84BT3hMpieTV-5ijkpL5A,3058 +triton/backends/amd/include/hip/hip_runtime_api.h,sha256=fzb_xktisCVcp2pWG-ZKhIG-YVQzDjGyPt4wvA4iayM,386498 +triton/backends/amd/include/hip/hip_texture_types.h,sha256=AhkvjG4cDjf_ZFLg5SsSTfBnXG614PBK1XVPa7irZbk,1237 +triton/backends/amd/include/hip/hip_vector_types.h,sha256=6FcBMBkP3ZN1Enalpa9hV0VopxdBJvbUCuaxISgzbTY,1630 +triton/backends/amd/include/hip/hip_version.h,sha256=J3vgzfZH0UkK8RYvyHVj1PbUNSZH1JPtlcmXxLBgwVk,407 +triton/backends/amd/include/hip/hiprtc.h,sha256=npK6f2ZkYIe5blJIGuofuTG0PrSMS2mkFBUqrdOp0A0,15631 +triton/backends/amd/include/hip/library_types.h,sha256=tPOJTQedPH5qC9meawLgKpnbFrQC2WKlfo6s0rhKoZc,2370 +triton/backends/amd/include/hip/math_functions.h,sha256=frzdJ4veBG8n9ALO4EmRrdOiDguR6FP6ygLnvOnVVSM,1815 +triton/backends/amd/include/hip/surface_types.h,sha256=uQHjITphDM7k4pnuEoDEupMUxBobzvhJpSy0unpegh4,1959 +triton/backends/amd/include/hip/texture_types.h,sha256=CtmdykZfDikhnrVfdJk3w2VK5X3Af_6rEKzU-VgLu24,6687 +triton/backends/amd/include/hsa/Brig.h,sha256=5H-btCHq40qgjjpwVAoRWf3E0ccf-J6UCPEcKx_hGKw,32705 +triton/backends/amd/include/hsa/amd_hsa_common.h,sha256=q_zN0eq-dwR7FnQ84PcpV3yZyvjHsouIAjJgKltGoX8,3912 +triton/backends/amd/include/hsa/amd_hsa_elf.h,sha256=r3xymEjYeTIBCPvlKBDJxKyI1Dfg6KDXc5VqO9Uy1iM,16352 +triton/backends/amd/include/hsa/amd_hsa_kernel_code.h,sha256=C55F8a480QsW16-iwN9TIT3cKnGh6GoeoEaEv3aVh4g,12659 +triton/backends/amd/include/hsa/amd_hsa_queue.h,sha256=ZJ-k5wY30heLmQnGB0VUz36XCiVHRmspg5FRNMGIk_U,4766 +triton/backends/amd/include/hsa/amd_hsa_signal.h,sha256=FDegZnWQC04GtnqHjXOBsB-AoVSaqdhNY6Mwbua5FGA,2947 +triton/backends/amd/include/hsa/hsa.h,sha256=Jft1K5uFAcasOD9IYW6wD5GsGQcPQTrmbpjie-0Wh00,190916 +triton/backends/amd/include/hsa/hsa_amd_tool.h,sha256=pyZSyIVl-UA5AOhte78jvn4V3hCd0dxJAIv7KeADsPs,2843 +triton/backends/amd/include/hsa/hsa_api_trace.h,sha256=2iuwHcpyW9wvr-WPKCgatQzYBaA8rTa3w1BRMXBGcSI,28982 +triton/backends/amd/include/hsa/hsa_ext_amd.h,sha256=Riw3Ii-AYts1w_yjVD96ZXuY6-BBpnlx_bnnltThK1s,116016 +triton/backends/amd/include/hsa/hsa_ext_finalize.h,sha256=sv0AZbDM-B1wIdQ3cHTMlpUtNacQN2PkOgX90IZol_o,20227 +triton/backends/amd/include/hsa/hsa_ext_image.h,sha256=t5YJm_aw9EePCeFL1hoIfQ8ubIjBte-ptfReq6Ts-8Y,54232 +triton/backends/amd/include/hsa/hsa_ven_amd_aqlprofile.h,sha256=9uev2nT29MCdu7-HMkg9iItHop6QMOBMQL5DAFnftSg,19777 +triton/backends/amd/include/hsa/hsa_ven_amd_loader.h,sha256=c6cxPAzAox7u6IbFzEkQZfCuRl-Kr39WhY2_w23X1R4,26146 +triton/backends/amd/include/roctracer/ext/prof_protocol.h,sha256=6FAcvVD-dNM7uulFs2B-aTxw5xOAWGy6evdD4yUaebA,3849 +triton/backends/amd/include/roctracer/hip_ostream_ops.h,sha256=WNXFZxawBXHmFGMDFIOZqXkCw6VzyDexwGPkGJre4w0,184840 +triton/backends/amd/include/roctracer/hsa_ostream_ops.h,sha256=AYwF-IT9Dhl2FX-GuvCJZX6fSmHK0xkKLORx9QxuSK8,57857 +triton/backends/amd/include/roctracer/hsa_prof_str.h,sha256=ctT-KKsIGayp7RUGUsFNR-dE65VydyXla_Qgvf-efTU,122884 +triton/backends/amd/include/roctracer/roctracer.h,sha256=B8sHz2DMNprP7EqNWIGwVLY1KQMpxmhfVy4UoR8dzzY,23849 +triton/backends/amd/include/roctracer/roctracer_ext.h,sha256=vLaZ8peAxSy0cwrdEalKnUApkKspfa04iw1Mr_Zcio0,2940 +triton/backends/amd/include/roctracer/roctracer_hcc.h,sha256=NlF3R8JQ9oX9lGpm0b2n-EWJ0r3y9sP9wbwnoucaCuY,1303 +triton/backends/amd/include/roctracer/roctracer_hip.h,sha256=RCzYuNw1vLR7xK4rb06TtM9TU546UYKHJ83IMHmZEm8,1432 +triton/backends/amd/include/roctracer/roctracer_hsa.h,sha256=M8APM64XNAWSslxQisM-pcmKoUQaUdTMaKvSACyt0Ag,4108 +triton/backends/amd/include/roctracer/roctracer_plugin.h,sha256=8GGE1zDbdPCVJtbmwOCYq7X0mwFjfWRtzDYKLD4cKys,4786 +triton/backends/amd/include/roctracer/roctracer_roctx.h,sha256=gBjBk5vb0l3PbBSQ7V9iFtaM_RzkIDJEW1A_PXBihBM,2014 +triton/backends/amd/include/roctracer/roctx.h,sha256=RhJXUXRhSJ5LRE_1gm7E6-bjEMrfcFBLDLuf3UxAIh8,6717 +triton/backends/amd/lib/ockl.bc,sha256=wQKCzkKukIHbu0lyjKUYlhndc7S27xto6L54J0Bn-C0,246124 +triton/backends/amd/lib/ocml.bc,sha256=UPNTXW0gCXUNB-c6orSYwb-mz9_mjUc7zny_vfFza44,205964 +triton/backends/compiler.py,sha256=JZiiEbB9Wws3tjU6KXrydKtlOQI7Suk-mTYPlafa0Qk,11388 +triton/backends/driver.py,sha256=QX_6P1Go9ajdlHZi4Hv3nCtdHyDA6o8_lM3NMnlH1mk,1386 +triton/backends/nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +triton/backends/nvidia/bin/cuobjdump,sha256=FLKFErTLe_YgWmaukj-B8lkDrW6il4BbWWX2S0X_b1s,663040 +triton/backends/nvidia/bin/nvdisasm,sha256=rwo7W-VxMOzwUKMQdn01SkxzCzCjvzuIwQDcPJvL6-o,50683112 +triton/backends/nvidia/bin/ptxas,sha256=lN2lShZzlA1W0wcsZO96rLEloeZDlFhuEPd6el_w_4c,30314080 +triton/backends/nvidia/compiler.py,sha256=6o2KR0Rnm7QGuRRIWxdDZp62hfJJSc0hD4-3LBA9LkY,16095 +triton/backends/nvidia/driver.c,sha256=q4oIpkjOtdHHfi8xBkm4jC4JWIk5AjKtN8WRkZb8MD8,17300 +triton/backends/nvidia/driver.py,sha256=dHteVmBJrGaxu7KKl6PbzidGDdEYPUO2Y7PqX-399DY,16371 +triton/backends/nvidia/include/Openacc/cupti_openacc.h,sha256=Z0OM5e_hbd3cxdXyn3SCHqBBQawLg4QORnlm57Cr2-M,3513 +triton/backends/nvidia/include/Openmp/cupti_openmp.h,sha256=E1WNmeb_7HaUSmBegtUNe4IV1i7pXeNxgzIlyKn1zrM,3491 +triton/backends/nvidia/include/Openmp/omp-tools.h,sha256=AmuC_xPC7VPu3B-W4PmXuCNufFawhY8PjNXePaQFAOg,37403 +triton/backends/nvidia/include/builtin_types.h,sha256=JxT9Vf2q2snxTBOL9ACzNmYzTWACO2VOVUu1KdFt7_g,3150 +triton/backends/nvidia/include/channel_descriptor.h,sha256=no_vNky02LeMLI0CF8GDVGHaPm_uRUGcVUMYdt_Xn4U,21482 +triton/backends/nvidia/include/common_functions.h,sha256=22LTZRVcPZzEH6MJda7nNMCvMgIjSTe0OKR7sEQj6kc,3410 +triton/backends/nvidia/include/cooperative_groups.h,sha256=JUBW-C1x_7WWuNOaoorTKQab0qzrykkG8oAw1mEHZ2s,60332 +triton/backends/nvidia/include/cooperative_groups/details/async.h,sha256=xsEHCZP3nuEY3l2p8SU2d1226XiXumUvDP_Gyh8PdVY,19122 +triton/backends/nvidia/include/cooperative_groups/details/coalesced_reduce.h,sha256=pBQgFY7i64V87XNATg1UEIQHVNYOItQtHjS5B4yn8pc,4257 +triton/backends/nvidia/include/cooperative_groups/details/coalesced_scan.h,sha256=DfZv5d5W0XJv-tZVhgrIdjLjs6aCx_u0oy1lDIpjo1Q,7314 +triton/backends/nvidia/include/cooperative_groups/details/driver_abi.h,sha256=v-ZUb4UgGKJk6NR2WCWHD3x_42y-togI1urFn70Gi-g,3964 +triton/backends/nvidia/include/cooperative_groups/details/functional.h,sha256=2BV8i8Bidz0kgxuYkJCAbwFxOIZRyzHgG-c_rVKhRzc,8905 +triton/backends/nvidia/include/cooperative_groups/details/helpers.h,sha256=K9jvxnXc5-6Fum1KG4EQKJJrVZ4BhHOSAJbZR4uDL0c,26476 +triton/backends/nvidia/include/cooperative_groups/details/info.h,sha256=Ij_cqIrcXCcwlaQqCL7AHzMD4H89y0tJeQXCbjTGsFo,12578 +triton/backends/nvidia/include/cooperative_groups/details/invoke.h,sha256=Osq3K-tZuXHVCMQJ708PjPo-BwMhjhjApO4b0TYLFJg,8616 +triton/backends/nvidia/include/cooperative_groups/details/memory.h,sha256=WU28eUcYLA1z131VYGulR4eVCSN9xK9KSxbV656YPs0,5484 +triton/backends/nvidia/include/cooperative_groups/details/partitioning.h,sha256=4UXuvUmZvGANy0hd4erdBNllpgnn4K4qFWWlfzAsHO8,7125 +triton/backends/nvidia/include/cooperative_groups/details/reduce.h,sha256=UfMezM5pqRIotJjmuFgOmiMvbu49sYgjraHutmVVr0w,22111 +triton/backends/nvidia/include/cooperative_groups/details/scan.h,sha256=-Ttwb2AfEEY_tsmqJjR2dojkPpoRx387SoqxgvfdBtQ,17166 +triton/backends/nvidia/include/cooperative_groups/details/sync.h,sha256=zoiBicvB7rlXa_r_VSNuvHVwrLIM7EjF_KdmhvPj1LM,10638 +triton/backends/nvidia/include/cooperative_groups/memcpy_async.h,sha256=erOIHuObdfxRhBWfrXE3wsZF4B2GUuqwzQrsPwKPpbg,2960 +triton/backends/nvidia/include/cooperative_groups/reduce.h,sha256=B0hgDkqM-6ueqTTgb3b34A0RH4vGz8mBf5e2jT1dJ1o,2949 +triton/backends/nvidia/include/cooperative_groups/scan.h,sha256=2EU6T5cWNwftm2B7FicV31PojoI61yo5fHXGRYkGk40,2940 +triton/backends/nvidia/include/crt/common_functions.h,sha256=-U44f4yUGmwDPwd7Q_3Cz5if05xHGPSlAzz5zMylLSQ,13559 +triton/backends/nvidia/include/crt/cudacc_ext.h,sha256=KW6n0ImOZKS0VqVmBHWTXtHI816hh88YeEgUg2aYdVU,3224 +triton/backends/nvidia/include/crt/device_double_functions.h,sha256=A1vB3g0qwnNEfcpT1d9RiGDaxqPXXgYr-Vxe2oMHyxY,39938 +triton/backends/nvidia/include/crt/device_double_functions.hpp,sha256=YYIbqYhb5Qmf8c4YfcC_jytg4FRwcXPjv3TFTwhb24E,8568 +triton/backends/nvidia/include/crt/device_functions.h,sha256=txuWyo2qoqRZTomi3BSjwUbFvtD9Ea0WKamRgMFQzjQ,136370 +triton/backends/nvidia/include/crt/device_functions.hpp,sha256=9BxQiHjRuETOIntxXAlmTPKp8wlXrBKTPcBaSUQmwfQ,38985 +triton/backends/nvidia/include/crt/func_macro.h,sha256=EOpDlaM917bh9cwBiFBPF689DCMBw5hFarxLxFt-i74,1755 +triton/backends/nvidia/include/crt/host_config.h,sha256=ZnNRtvunIV0ctARy5qbTC1fa5-JpSK5eZ5u5SCcu_BM,12169 +triton/backends/nvidia/include/crt/host_defines.h,sha256=agpWQb4K25fhOP_RsrIuz1L_vPeC2AkbmJY12QgpXKc,9950 +triton/backends/nvidia/include/crt/host_runtime.h,sha256=lOpmkxFZVkEp8dcMAGEZRITsh-19o9jy39kdSNLc3Ng,10284 +triton/backends/nvidia/include/crt/math_functions.h,sha256=iYVBIFDocDsPxqaeKHeeTxAsY-zf04-zfkmETyeahuc,396266 +triton/backends/nvidia/include/crt/math_functions.hpp,sha256=u-CGbd0R2FZWdKG-6bdmGSor9KT_wnmISj63lPQKASM,100207 +triton/backends/nvidia/include/crt/mma.h,sha256=BgSSvJ_IR75W-3uLlC2yE6B7rHeWtamaNn6-XzYU73U,62564 +triton/backends/nvidia/include/crt/mma.hpp,sha256=spo0LX71tUCipxK517Bssj0nc-ZHf8oMWzvHoYYB_6I,66599 +triton/backends/nvidia/include/crt/nvfunctional,sha256=FDM0zqWO6bl9jpJKz9U8CMbjt6iTKh18tQalxAvRsag,16900 +triton/backends/nvidia/include/crt/sm_70_rt.h,sha256=Kf830xymA-zmF7LsunFHLSNyhhT5UiJMocgoHBQeNns,6837 +triton/backends/nvidia/include/crt/sm_70_rt.hpp,sha256=3a_rU-Y0MSB4htBDFY4PCQ_jXiWFTe7WT1ZyhMuCJOA,7837 +triton/backends/nvidia/include/crt/sm_80_rt.h,sha256=MdJHWCRzLM__nDDf1go61rDsl9ydOW3oi6SZBfjUyc8,7743 +triton/backends/nvidia/include/crt/sm_80_rt.hpp,sha256=o-rJu-jpehCeyABGgv-8dYRB7oJTCwuNdvSCq0VURdE,6705 +triton/backends/nvidia/include/crt/sm_90_rt.h,sha256=an47m0XFBaJ3pUX9MlE4-nktP1jb3eJUXhQ3ntZtzc8,11445 +triton/backends/nvidia/include/crt/sm_90_rt.hpp,sha256=YuqVygGV6rgtWtx1J9cPpEI3BXKQBII-Ez6oZFP3wrE,9228 +triton/backends/nvidia/include/crt/storage_class.h,sha256=dzcOZ16pLaN8ejqHaXw4iHbBJ6fXWxfaU-sj2QjYzzg,4791 +triton/backends/nvidia/include/cuComplex.h,sha256=WpcgpaiPhU_o9sTPMcNTEZuyXDIc8x3sz4dUWSztL2g,12186 +triton/backends/nvidia/include/cuda.h,sha256=29OuNnfs8Hb2sqCXHUKy3VudXxzN8050d0oW_C33ysE,1048458 +triton/backends/nvidia/include/cudaEGL.h,sha256=_CwaQ4cEP1vfNyBSSd5qFxznPCYOovF6Cpj-QWSIBq4,39544 +triton/backends/nvidia/include/cudaEGLTypedefs.h,sha256=xF_FAN1Kar9oyHJ3cCU7jztTpxX8WylpiuYyYpGGHek,5645 +triton/backends/nvidia/include/cudaGL.h,sha256=gMT1HPGa-siuji0gAsKYr4X45Lc29HKglC_ttNSGyUM,22501 +triton/backends/nvidia/include/cudaGLTypedefs.h,sha256=dClpQI-LuXgF9rPSBsj7OkIg8g_fXDjT0hLZS8TGpOg,6576 +triton/backends/nvidia/include/cudaProfilerTypedefs.h,sha256=F2aWLIKv_AhNbxNOaZVcRsxIh0kuscnV8UMWWxkBAlY,3297 +triton/backends/nvidia/include/cudaTypedefs.h,sha256=0hWYyV-KM7R5Qjagz9UP1ldhAZDHGIcJmYtYvB_nwNc,110387 +triton/backends/nvidia/include/cudaVDPAU.h,sha256=Np7Nc2Wjaz--hkpbhW6f9aapr-NbcPDAgkot0sJerco,12694 +triton/backends/nvidia/include/cudaVDPAUTypedefs.h,sha256=wz8nyOUdwM9mH9JO3QZW-A9dyxt-IufSX7nggSXpCNs,4144 +triton/backends/nvidia/include/cuda_awbarrier.h,sha256=3ZH-ZlXODhSiwSY9rqSni_EQwi25QMHP6Tm-zOdxBwE,9340 +triton/backends/nvidia/include/cuda_awbarrier_helpers.h,sha256=OCskCts5bCKl_RKBe9M74zKSIsVpePn44S_aJp1tFXE,12489 +triton/backends/nvidia/include/cuda_awbarrier_primitives.h,sha256=n5__E1jYYDhlgH-f3u8MQjtz57UZ7v5VshhMye1eicM,4699 +triton/backends/nvidia/include/cuda_bf16.h,sha256=2BKEN_8pbieiBHShSfIawa-Oy_3jJzQAl74TqoLQ3MQ,185707 +triton/backends/nvidia/include/cuda_bf16.hpp,sha256=ZJlZSkQJ65G0yhMPDAq3m-oMaEJ3ia9FOsbgnzCtPS0,137924 +triton/backends/nvidia/include/cuda_device_runtime_api.h,sha256=bIhfusirXe5-osOTPAILDh6pY8MW1hefyZvTD_IzgqM,46249 +triton/backends/nvidia/include/cuda_egl_interop.h,sha256=PNWYns30MIytJQHSOh7UbZYlaTX5e0bavzK14tde_C8,37109 +triton/backends/nvidia/include/cuda_fp16.h,sha256=1J7SldpmJk8SNDGD3SO0yVrsLoHkpN1VnMtRZr2Gbcs,175974 +triton/backends/nvidia/include/cuda_fp16.hpp,sha256=JyedVIUALPBiR_Ci3Rxef_sUs9VvDiP4MDc97Yk_Ys8,123259 +triton/backends/nvidia/include/cuda_fp8.h,sha256=Q3OP5o_3rSYbKtVIlcXVr_CncU3SPM-09j605e2Zegw,13833 +triton/backends/nvidia/include/cuda_fp8.hpp,sha256=b-PcyZgei5MmIp6op0QQ40BgNupO_ei648hG_dUS-FQ,64246 +triton/backends/nvidia/include/cuda_gl_interop.h,sha256=VQEswFeOBF6JN6Q0pdlkvc5WT7bD1FnTfKewvANulCc,19150 +triton/backends/nvidia/include/cuda_occupancy.h,sha256=Kr9HyOe-hlRjBAzbINwUYkNgbbIgIjuvKs09UZhMYQo,67179 +triton/backends/nvidia/include/cuda_pipeline.h,sha256=0enXG49wN4JajlQi3ahbp2ei_ufTY_Mznic7zfWmKHM,8130 +triton/backends/nvidia/include/cuda_pipeline_helpers.h,sha256=bo1L7e6vCuM-K3Il8K1z4wJUja5DyXQKdo_hSWUME-E,13852 +triton/backends/nvidia/include/cuda_pipeline_primitives.h,sha256=FnJJtuV6rHr6LgL56XDwilcSbFr6W1Hj6mf1AJaMI20,8675 +triton/backends/nvidia/include/cuda_runtime.h,sha256=a-OXWPsmKSPst7mRCCxHNZV7m-uRLCAY8oGRi-dJzPA,90683 +triton/backends/nvidia/include/cuda_runtime_api.h,sha256=7Ys9yv_2trFEVybtbh-UJKnDKG8fHWvUjSX4cgZGCck,608580 +triton/backends/nvidia/include/cuda_stdint.h,sha256=XbFOk9CtJjKqk7PpYNqbSVsDxAsVM8avA4rWpPi0BjQ,4093 +triton/backends/nvidia/include/cuda_surface_types.h,sha256=Mw5Lo4b8Q-f9mogOvATGyHhu9d2t2K6XOxuqtZrSh3A,3688 +triton/backends/nvidia/include/cuda_texture_types.h,sha256=ITbX-JNnP7Rm-JSgNVdJ9pq6k8FVor8RbnruDsKq6sk,3688 +triton/backends/nvidia/include/cuda_vdpau_interop.h,sha256=bXQanWc2IFXZAKWNGl2xAz9nLvFmQpWyGrsDvfeS9FA,7727 +triton/backends/nvidia/include/cudart_platform.h,sha256=YN6sKhB0b9w5tGX1IYL7ulJVPrWAiX9A44qLv4EtW5Q,2717 +triton/backends/nvidia/include/cupti.h,sha256=JkVyAGTIMYzwm62dfVqas3nMcILhgP_Wdz6fh4_NED0,4697 +triton/backends/nvidia/include/cupti_activity.h,sha256=1aNI_zmQnjAguMBU0UqqMR_heE77FiafQkZl9or_1Ww,210387 +triton/backends/nvidia/include/cupti_activity_deprecated.h,sha256=rYJsoAJxA2BTT50-olN8EYcSzdlXBpRbR1ATLG3rVIM,121526 +triton/backends/nvidia/include/cupti_callbacks.h,sha256=zrEVRb0hubSfD69QUmHsJiL8oAfvqyuKGcTVRihQrnc,29729 +triton/backends/nvidia/include/cupti_checkpoint.h,sha256=rTz8JoWxqESBXyZWUhZJGm4xeYcx4OJOtJ7Ld13T_b0,5264 +triton/backends/nvidia/include/cupti_common.h,sha256=85m74bxUgXp3tEaPQpezeazmpsNMw41PsjNSYmQdT20,3514 +triton/backends/nvidia/include/cupti_driver_cbid.h,sha256=dHKyQYZbBbdlxixzFkIoNHg5IfGXdgriyjN1Bu1i6g4,74462 +triton/backends/nvidia/include/cupti_events.h,sha256=f7lLGmD2e8FzvMhRgnn0-v7U0vTpUkiQHIpQxgARGb0,51896 +triton/backends/nvidia/include/cupti_metrics.h,sha256=iLAOlDrcbHEsIIUmgq0Tp1ZOY9O3Ot3wj2-bI8iYbSs,32148 +triton/backends/nvidia/include/cupti_nvtx_cbid.h,sha256=_azPtR1g4qivvX7qbvHRUg0RHCWF7iEOJyHMN9qZe9E,5912 +triton/backends/nvidia/include/cupti_pcsampling.h,sha256=ycJHT36DmPIaVzHsB3xxjXkhFyEfMCJOl3LbCsHFgyA,32144 +triton/backends/nvidia/include/cupti_pcsampling_util.h,sha256=lx8CaNXowJe5Zvc06LE-u_Zry_jODs1mM6j9Q5WIX9E,12430 +triton/backends/nvidia/include/cupti_profiler_target.h,sha256=JsceoDuhllWNEzaO0xxT81dJ55NrbF0UtRJJgit0P_E,32131 +triton/backends/nvidia/include/cupti_result.h,sha256=a-C4Y7LAYCiCT1ngOfoDuTi2stEG1YTafwwn6UfL-LU,12603 +triton/backends/nvidia/include/cupti_runtime_cbid.h,sha256=11pXl0MdmTtxUngel-ru4JdqWvF_gEIG14aQExRyfzI,46436 +triton/backends/nvidia/include/cupti_sass_metrics.h,sha256=3RW9snJuFQdOhrEn3wDJOru05q0V_zssWrqD7tvVJKw,19674 +triton/backends/nvidia/include/cupti_target.h,sha256=x4Vz1Upb6m9ixmVpmGaKQldDWYQI3OZ-ocEXGzNK0EE,1263 +triton/backends/nvidia/include/cupti_version.h,sha256=sjd-aUoTGkEWyvA2VUWIpZwXyXAaclqC8gbwNnuK5D0,4425 +triton/backends/nvidia/include/device_atomic_functions.h,sha256=OR2jNSfSKzaFri74zh4Vtz5M0z9UDBU3rKeC1rYaVQs,9500 +triton/backends/nvidia/include/device_atomic_functions.hpp,sha256=0e7MOiNNUnnloXpB_r9WT5YOws5cxgzQQAzRCYvgaFA,10486 +triton/backends/nvidia/include/device_double_functions.h,sha256=KUxId5Z1fx8SWfLRTxPD7RB-zN7zslzb4n7JaJLfL3I,3452 +triton/backends/nvidia/include/device_functions.h,sha256=bWSrhTYE9NQlss7xMSMEVusvto9j2fgUDXWVH2W_cOA,3410 +triton/backends/nvidia/include/device_launch_parameters.h,sha256=H1_CC-vvAaS26ys4XsTFkMgTxUTciAjdjswjizkisvQ,3846 +triton/backends/nvidia/include/device_types.h,sha256=2LFxoZBJPoA5V0H1EbKTEaXDi3GDJPtzOPdRHDaucIQ,3588 +triton/backends/nvidia/include/driver_functions.h,sha256=cN3IjRAz2Mj2Pj35SyxJIkZNDDusnJqaqzBdMzpQKbA,4625 +triton/backends/nvidia/include/driver_types.h,sha256=4eBQ10Nzgfs2BlxGaGHVMWLvnJfKrEnMml9zfFi0DyA,177782 +triton/backends/nvidia/include/fatbinary_section.h,sha256=NnuUfy358yGJx4enq0pBnetjv17UWa-nOlgYToUitrw,1809 +triton/backends/nvidia/include/generated_cudaGL_meta.h,sha256=dfd2QuaRdEjbStOKvaQLi1Md_qrpRQh8PfyZznJ8bWY,3115 +triton/backends/nvidia/include/generated_cudaVDPAU_meta.h,sha256=fAedsoQxaU3hIAApAWDOKsa9kgcuQw4tdyf8klLm-3k,1453 +triton/backends/nvidia/include/generated_cuda_gl_interop_meta.h,sha256=LXOqvQCej0sCgAT1LUKKYZ466EFxN4hIwf9oIhXOLF0,2250 +triton/backends/nvidia/include/generated_cuda_meta.h,sha256=hawYpDe0xpaDFDnClXI91JjwCRxWb-AS0FS8ydUMgxc,94639 +triton/backends/nvidia/include/generated_cuda_runtime_api_meta.h,sha256=D8CbAN3-jLuF2KGfsBHXEELSgL92KrUAiDvugWE8B8M,69706 +triton/backends/nvidia/include/generated_cuda_vdpau_interop_meta.h,sha256=8OLqWN26aEYpTWUXtbHJvA5GYhVv3ybYVOTW7yK37z8,1367 +triton/backends/nvidia/include/generated_cudart_removed_meta.h,sha256=X3I5WXmhtsJNNlgY7coJ5vg4t11G5FRR6Xo7MboIeck,5172 +triton/backends/nvidia/include/generated_nvtx_meta.h,sha256=YHb_RD8g3s4m8PJn7Z0wnxvUHarl7BOAX5ADr-BL3HI,7513 +triton/backends/nvidia/include/host_config.h,sha256=BscH_GazAZbbotddVzL5RmafbQ-QjRx8f-I1O01IBW8,3380 +triton/backends/nvidia/include/host_defines.h,sha256=bBQwQF5C1N1c2qpLV56g1c-weu9Ysgz-gIf2Kn3uz_A,3386 +triton/backends/nvidia/include/library_types.h,sha256=p6746aCd_A_1VlgKRhLJChzeZ4tN7e4HBH2Hm7hDjbU,4836 +triton/backends/nvidia/include/math_constants.h,sha256=cV6hAyQe8X7f7MBtaKjjIJq3BycOUDp6I5cizJX5HLw,7608 +triton/backends/nvidia/include/math_functions.h,sha256=5XcC6j-fJKttvhwc4hZNoLHNw808a2ZYIOtZ7ry7yd0,3398 +triton/backends/nvidia/include/mma.h,sha256=IY_VenxuEncwGq92MhrWUb-Xswh0ekAXLy9Rbxhxa2Y,2932 +triton/backends/nvidia/include/nvPTXCompiler.h,sha256=z_v0P6Sj0KfDQBmAKIdgFoPOylhsO4B221w3KDUqbM0,12076 +triton/backends/nvidia/include/nvfunctional,sha256=IkFoCi_Q4OhP9nEuBI-5jWwFlR_PfG05hJH7lSMsfWc,2975 +triton/backends/nvidia/include/nvperf_common.h,sha256=BqPml9AxyN10-ptWT3hQzh2JUWqQX57Q5BjQ3ZuaKNs,17255 +triton/backends/nvidia/include/nvperf_cuda_host.h,sha256=aBnyIr_hexPDGBkP6WSujN1mI_DYP25sEIXWYY1O7VI,8298 +triton/backends/nvidia/include/nvperf_host.h,sha256=afdHG6eraeo4ltlF9ihskqhU7IccxcRCaZDZ6_ikjkg,68506 +triton/backends/nvidia/include/nvperf_target.h,sha256=ZDA-JI459tLBW4iLLCQjYYRAMeHwfqDIgXbVqVLDYZ4,22539 +triton/backends/nvidia/include/sm_20_atomic_functions.h,sha256=x4ycINVq__l9B4SQPD-I48jQbKxxdBmgp8Vf2GO0Qfg,4478 +triton/backends/nvidia/include/sm_20_atomic_functions.hpp,sha256=1l5NLM8DhDbqYZ_E51LoqElQJXObkbwo57d3r-4uEbE,4107 +triton/backends/nvidia/include/sm_20_intrinsics.h,sha256=a4jDSp_DUW0d09g5wgEm_I7bGTAe73HKRinkhBKQBis,51048 +triton/backends/nvidia/include/sm_20_intrinsics.hpp,sha256=BhEBuXSKBsNGJDBJDtYL0cGRI3wX_w_OIgA5D-YxIWk,7694 +triton/backends/nvidia/include/sm_30_intrinsics.h,sha256=b6W8Vxp9vD9OCJI6lZuGyZYXEdQ3Ei8PTAloHNkwCcQ,16978 +triton/backends/nvidia/include/sm_30_intrinsics.hpp,sha256=yX0ebd265tJ-BDhvluP2BhadPuWXpRZPI2eeQFFt5ys,24567 +triton/backends/nvidia/include/sm_32_atomic_functions.h,sha256=HGnZgQHACE2AAb6zabGUURc53IsVZelc2BSJqvs9OgY,5703 +triton/backends/nvidia/include/sm_32_atomic_functions.hpp,sha256=CQTTvOEYp-s5hqAgLvAon11vLYDrDp8cTHdel-XRzBQ,6592 +triton/backends/nvidia/include/sm_32_intrinsics.h,sha256=Xdkogdsjy1vh8u3eGu0i5xTmHxBGAjj6_vVGR-spdOE,33539 +triton/backends/nvidia/include/sm_32_intrinsics.hpp,sha256=Gl8aSLDLcit4W3pKQS19GsDG8RYcwD65HwYB_CeZe8M,70616 +triton/backends/nvidia/include/sm_35_atomic_functions.h,sha256=a3XoEsKRCEOf0Q_5Y__rMfmC4pScv4VkUggVgVJVn44,2909 +triton/backends/nvidia/include/sm_35_intrinsics.h,sha256=0mS5-LCgvZiTvL7-MG_4YwI-zWGvM-s4xyRuMkunMC8,2664 +triton/backends/nvidia/include/sm_60_atomic_functions.h,sha256=_anfNaJsvQpDEorYeUKIkbizYkwrinBcG_ZCiECtLqI,13178 +triton/backends/nvidia/include/sm_60_atomic_functions.hpp,sha256=cgIKddDn2B3QzYlzeBILAP1IRys74QCCxsH0QqaVGls,22903 +triton/backends/nvidia/include/sm_61_intrinsics.h,sha256=h_MBL1UUDxQX_qOddSImzqyFjcrhhm_63G97pGDyreU,10902 +triton/backends/nvidia/include/sm_61_intrinsics.hpp,sha256=N-nQvcBsPMT2Umy5zR69c9K1q366W-Jqe7NpoLTqTmg,6787 +triton/backends/nvidia/include/surface_functions.h,sha256=b1O82SAvEgWWxA9uZTWQcGimzZUoem2QbAET3wh3fZc,6782 +triton/backends/nvidia/include/surface_indirect_functions.h,sha256=vy9QuFVV-ezZP-x2RT9RLp2qIUgdngACOCmalSfVFPA,10877 +triton/backends/nvidia/include/surface_types.h,sha256=XkFXD1nHbeSMgajR-UJE9uQ7TByzJnjdnUL4-yGiufk,4530 +triton/backends/nvidia/include/texture_fetch_functions.h,sha256=KLCmUxf5aY5_UalX8tSFB6e4TrjA8hyUPxLOkMFltAo,12468 +triton/backends/nvidia/include/texture_indirect_functions.h,sha256=lH_y3Ni-hq4RZ0_PMFbBM0th5-OmTn3TtqtpkHHhA8w,21163 +triton/backends/nvidia/include/texture_types.h,sha256=73ntVyg8r8fzKy5VIk6yuvC45GDeWepaLIqIk-M3Ri8,6360 +triton/backends/nvidia/include/vector_functions.h,sha256=WypGkL-IDbGOlay7g_G0p3HO7OLGRE0Do__JtiFoWxY,8003 +triton/backends/nvidia/include/vector_functions.hpp,sha256=afXhNSd3LFTZo96EPtesTLfvxd4nTmLVzgkj967rTRg,10060 +triton/backends/nvidia/include/vector_types.h,sha256=6CJ4yt3KD7zQVfm1NhrgqNYYEDEIZWwaivlFx12nhNg,13396 +triton/backends/nvidia/lib/cupti/libcheckpoint.so,sha256=EGsm1PpJorzbPRR1EiWN4r_itT9gggP-hWFf7-vNc_4,1507009 +triton/backends/nvidia/lib/cupti/libcupti.so,sha256=mc-9PurtJImbLjpzvErDJT04BaJcQMBe6MLIf17HgWQ,7756057 +triton/backends/nvidia/lib/cupti/libcupti.so.12,sha256=mc-9PurtJImbLjpzvErDJT04BaJcQMBe6MLIf17HgWQ,7756057 +triton/backends/nvidia/lib/cupti/libcupti.so.2024.1.0,sha256=mc-9PurtJImbLjpzvErDJT04BaJcQMBe6MLIf17HgWQ,7756057 +triton/backends/nvidia/lib/cupti/libnvperf_host.so,sha256=hd_RMT1D7g6B3q9auwMqgn05ylkAvIf35MmpDVG9hGo,28159049 +triton/backends/nvidia/lib/cupti/libnvperf_target.so,sha256=p9-QHrZfD0PViRXSqL4WxENogAheR9_4ELS-1NSR-6k,5609441 +triton/backends/nvidia/lib/cupti/libpcsamplingutil.so,sha256=2ayD-Sns2y2Fcb-ZwWu8_50wCJvKddviHBEqm4hACHI,916321 +triton/backends/nvidia/lib/libdevice.10.bc,sha256=XC-uN8huaMOjhgWpX1EtfRLV89uYYxC-R_VzBKpype4,473728 +triton/compiler/__init__.py,sha256=kSVpmv2ro25zaF-fVJcpeyxMpRRb5uCiXQo7DqhG1CQ,239 +triton/compiler/code_generator.py,sha256=g80O73MyM9acg7XNfcHgrq6I4bNgEd5_LY7gYp3vPsE,57574 +triton/compiler/compiler.py,sha256=Oazjlobciua1N9FK4UBXUkK1gzdGtRnm4ymzVfXZ42Q,17481 +triton/compiler/errors.py,sha256=I9Y15pDWcL9heY4SWWdLeMDtW6Iiq2pFXzKfJ6dY_C0,1732 +triton/compiler/make_launcher.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +triton/errors.py,sha256=8WfnuRKLG578mgY6cBA3ECruVMf9ULEKFNgRcJ6IhWM,89 +triton/instrumentation/libGPUInstrumentationTestLib.so,sha256=BU6ZPuWdF7OoobCmlWXgmJsLYVo325LmwKjZ115v6ao,12379168 +triton/language/__init__.py,sha256=jGmSXwL_gpfWobg5qwlU124HNi_aLX5cNgx_Y9NiBX0,4852 +triton/language/_utils.py,sha256=bkp98MH2y3mfSI7h1u_T33VPwYqsbnIJkjuwIsNsfE4,646 +triton/language/core.py,sha256=a9A4B5uUBn1ijuqSZFMP7APU1wJ_-DTom51vvDGui9k,93793 +triton/language/extra/__init__.py,sha256=XRXFvr7416pRsh_Rh-X6qV66SiEyVDVbxp4GSAE1mfc,655 +triton/language/extra/cuda/__init__.py,sha256=JqiuryHnWRkfFztXgxbiQ62XA4dEKhsjhIHGobLuzcQ,414 +triton/language/extra/cuda/_experimental_tma.py,sha256=FwtsItBySF70RzS3qMKrlcdxznjFom6JD40QOs_RfNU,3555 +triton/language/extra/cuda/libdevice.py,sha256=crwXcdixYPuvzVOQ0e5styRAwQrUg0RRRlqek7QvXRw,56165 +triton/language/extra/cuda/utils.py,sha256=e1BslV7lZGhi2uVIlo5lI9dcN61HUMIU2asPaRjsyIo,4379 +triton/language/extra/hip/__init__.py,sha256=ieSER4LeX9_0horChGUUVwpuKAprkuka8uGAkEBDyDM,49 +triton/language/extra/hip/libdevice.py,sha256=EVraUfeXzQmN3F5Lleg2mohVcbFWOWlLaAH1nkbqtV4,16841 +triton/language/extra/libdevice.py,sha256=Dki14elRNmQsz-Ytw9CnOaLCCnte4T6cI8bOzWjN63A,6318 +triton/language/math.py,sha256=Lkr348qTen3UxyB-tu4_j368LzCRK1KnIE7qwEC9Kg8,7442 +triton/language/random.py,sha256=s664rmyx6UCFJUo8M2EhNHUsckROwhmWXdf6UuAQp2I,6864 +triton/language/semantic.py,sha256=yJhocGpO3_X4YSk9GTRQMalbhDt7eh9SOzDq02Djg34,79854 +triton/language/standard.py,sha256=NMo6NQOJt81Zxy9s-U9o4xmg5DlKhY04H7WTRKfMBS4,13747 +triton/profiler/__init__.py,sha256=8MMGWMNsHxvgFva8l6o9lzUcAdGjpxiQouuTwJ4qkdQ,184 +triton/profiler/flags.py,sha256=BFBKQnozRN9Jp18_S5MuIeu5CJMW7_I38pM55qOg2oQ,604 +triton/profiler/hook.py,sha256=u5QsT4n4N94Xk2Cq23na7EuwUns1ZCNeShacUBgdwyo,1112 +triton/profiler/profile.py,sha256=iKoHJ_TxbaUUWgx6FvL9GyxnUFAE-fZKHVb7J4tk01o,6343 +triton/profiler/proton.py,sha256=ekp-_WPCn55Y0Xb4dLHVF-BOq07TLO5_BINILEeVPUA,2896 +triton/profiler/scope.py,sha256=9G87t4SpolCn0AzB8veK-xMM0x3hQ8ML7wdySbrJ0EQ,3169 +triton/profiler/viewer.py,sha256=oH8JtTFZ_VX1MCsZgT0FtzX8N5PPr76F_h9rrjyk1yM,13814 +triton/runtime/__init__.py,sha256=mKL5cqIBDUw2WO80NRCh4s1G8KYaqgM59TTAbTkPPjQ,621 +triton/runtime/autotuner.py,sha256=BJe69v9MSMSzdkvYSUDrvXrAFeLZ1x6A-7aUmpz2Le0,17271 +triton/runtime/build.py,sha256=KJgXirU54S8qTGsFpnG7DFuz5fK_kIUwReOhSgVEAnU,2830 +triton/runtime/cache.py,sha256=OQhUkwIW38-kayOL8P6SizWMAYSoVa_TbOYdTUHBkU0,10268 +triton/runtime/driver.py,sha256=VZ-883Xri71R72lHB6usIpLo3gGLbZJkAlLP3ewWSpc,1509 +triton/runtime/errors.py,sha256=oj73dn34qJbLhOjakakAuZPSv-laZyIYylJiJwREA8Y,787 +triton/runtime/interpreter.py,sha256=0SPiXDlM7X7DbCdu2JXoLmxJ8ugCwH_3NPoxuU0tJyg,53201 +triton/runtime/jit.py,sha256=8C8OgvZ0pRRL-8S2PbK9Knp6m6kbGE6O1immZpXVIzA,35303 +triton/testing.py,sha256=fX3pn9bjC3Z-z5qzSKW56C_2WF8h3mHLy5RJqpZ-HsA,19382 +triton/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +triton/tools/build_extern.py,sha256=jCr-2hu3nLGBIJhCGUQ1jAyzLttughjkiPGEwRFjLR0,13673 +triton/tools/compile.c,sha256=rjuAQ8b-2DTtbj29SgK1NxJI5BSU2P9ccp9wa5p8Iyc,2090 +triton/tools/compile.h,sha256=n9QKIFZTL4RSsiXtAxBP9XGSnxjyaevQQ9bBpwDsvAg,332 +triton/tools/compile.py,sha256=b3yNnVgoBk8WzOs87JrZPDIyasdSgAslOWmxse1J6yM,6761 +triton/tools/disasm.py,sha256=BBO4bALdLcWgWDLhQdYHLlTx3oo8g_d8maeE_Uu-FmU,5088 +triton/tools/experimental_descriptor.py,sha256=0Wqy96Cc6YLh9o0eTknW-Lfvha6lfRSfe8bswkcPHMs,1260 +triton/tools/link.py,sha256=u7qtfZRLriZkAMEGNvj8YF-k1cthmLL7BwHYqBgT63E,11871 diff --git a/lib/python3.10/site-packages/triton-3.2.0.dist-info/REQUESTED b/lib/python3.10/site-packages/triton-3.2.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/triton-3.2.0.dist-info/WHEEL b/lib/python3.10/site-packages/triton-3.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..91ee8fda184e552c2904a70107ce044d35988068 --- /dev/null +++ b/lib/python3.10/site-packages/triton-3.2.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.8.0) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64 +Tag: cp310-cp310-manylinux2014_x86_64 + diff --git a/lib/python3.10/site-packages/triton-3.2.0.dist-info/entry_points.txt b/lib/python3.10/site-packages/triton-3.2.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..fec7e033ca5aee50e0b944b9c14f2987c668d505 --- /dev/null +++ b/lib/python3.10/site-packages/triton-3.2.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +proton = triton.profiler.proton:main +proton-viewer = triton.profiler.viewer:main diff --git a/lib/python3.10/site-packages/triton-3.2.0.dist-info/top_level.txt b/lib/python3.10/site-packages/triton-3.2.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..9bcbb7a6a4fa823eb1d1828c32e080e3214caddf --- /dev/null +++ b/lib/python3.10/site-packages/triton-3.2.0.dist-info/top_level.txt @@ -0,0 +1,13 @@ +triton +triton/_C +triton/backends +triton/backends/amd +triton/backends/nvidia +triton/compiler +triton/language +triton/language/extra +triton/language/extra/cuda +triton/language/extra/hip +triton/profiler +triton/runtime +triton/tools