nnilayy commited on
Commit
011f64b
·
verified ·
1 Parent(s): 8f2b60a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lib/python3.10/site-packages/av/__init__.py +69 -0
  2. lib/python3.10/site-packages/av/_core.pyx +65 -0
  3. lib/python3.10/site-packages/av/bitstream.pyx +95 -0
  4. lib/python3.10/site-packages/av/buffer.pxd +6 -0
  5. lib/python3.10/site-packages/av/buffer.pyi +9 -0
  6. lib/python3.10/site-packages/av/bytesource.pxd +14 -0
  7. lib/python3.10/site-packages/av/bytesource.pyx +43 -0
  8. lib/python3.10/site-packages/av/datasets.py +123 -0
  9. lib/python3.10/site-packages/av/descriptor.pxd +20 -0
  10. lib/python3.10/site-packages/av/descriptor.pyi +7 -0
  11. lib/python3.10/site-packages/av/dictionary.pyi +10 -0
  12. lib/python3.10/site-packages/av/error.pxd +3 -0
  13. lib/python3.10/site-packages/av/error.pyi +72 -0
  14. lib/python3.10/site-packages/av/error.pyx +430 -0
  15. lib/python3.10/site-packages/av/format.pyx +170 -0
  16. lib/python3.10/site-packages/av/frame.pxd +14 -0
  17. lib/python3.10/site-packages/av/logging.pxd +2 -0
  18. lib/python3.10/site-packages/av/logging.pyi +33 -0
  19. lib/python3.10/site-packages/av/opaque.pxd +12 -0
  20. lib/python3.10/site-packages/av/option.pxd +21 -0
  21. lib/python3.10/site-packages/av/option.pyi +55 -0
  22. lib/python3.10/site-packages/av/option.pyx +172 -0
  23. lib/python3.10/site-packages/av/packet.pxd +21 -0
  24. lib/python3.10/site-packages/av/packet.pyi +25 -0
  25. lib/python3.10/site-packages/av/plane.pyx +20 -0
  26. lib/python3.10/site-packages/av/py.typed +0 -0
  27. lib/python3.10/site-packages/av/stream.pxd +26 -0
  28. lib/python3.10/site-packages/av/stream.pyi +48 -0
  29. lib/python3.10/site-packages/av/stream.pyx +269 -0
  30. lib/python3.10/site-packages/av/utils.pyx +78 -0
  31. lib/python3.10/site-packages/multiprocess/__init__.py +39 -0
  32. lib/python3.10/site-packages/multiprocess/connection.py +981 -0
  33. lib/python3.10/site-packages/multiprocess/context.py +362 -0
  34. lib/python3.10/site-packages/multiprocess/dummy/__init__.py +126 -0
  35. lib/python3.10/site-packages/multiprocess/dummy/connection.py +75 -0
  36. lib/python3.10/site-packages/multiprocess/forkserver.py +347 -0
  37. lib/python3.10/site-packages/multiprocess/heap.py +337 -0
  38. lib/python3.10/site-packages/multiprocess/managers.py +1369 -0
  39. lib/python3.10/site-packages/multiprocess/pool.py +954 -0
  40. lib/python3.10/site-packages/multiprocess/popen_forkserver.py +74 -0
  41. lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py +72 -0
  42. lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py +131 -0
  43. lib/python3.10/site-packages/multiprocess/process.py +432 -0
  44. lib/python3.10/site-packages/multiprocess/queues.py +383 -0
  45. lib/python3.10/site-packages/multiprocess/resource_sharer.py +154 -0
  46. lib/python3.10/site-packages/multiprocess/resource_tracker.py +234 -0
  47. lib/python3.10/site-packages/multiprocess/shared_memory.py +535 -0
  48. lib/python3.10/site-packages/multiprocess/spawn.py +297 -0
  49. lib/python3.10/site-packages/multiprocess/tests/__init__.py +0 -0
  50. lib/python3.10/site-packages/multiprocess/tests/__main__.py +33 -0
lib/python3.10/site-packages/av/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MUST import the core before anything else in order to initialize the underlying
2
+ # library that is being wrapped.
3
+ from av._core import time_base, library_versions, ffmpeg_version_info
4
+
5
+ # Capture logging (by importing it).
6
+ from av import logging
7
+
8
+ # For convenience, import all common attributes.
9
+ from av.about import __version__
10
+ from av.audio.codeccontext import AudioCodecContext
11
+ from av.audio.fifo import AudioFifo
12
+ from av.audio.format import AudioFormat
13
+ from av.audio.frame import AudioFrame
14
+ from av.audio.layout import AudioLayout
15
+ from av.audio.resampler import AudioResampler
16
+ from av.audio.stream import AudioStream
17
+ from av.bitstream import BitStreamFilterContext, bitstream_filters_available
18
+ from av.codec.codec import Codec, codecs_available
19
+ from av.codec.context import CodecContext
20
+ from av.codec.hwaccel import HWConfig
21
+ from av.container import open
22
+ from av.format import ContainerFormat, formats_available
23
+ from av.packet import Packet
24
+ from av.error import * # noqa: F403; This is limited to exception types.
25
+ from av.video.codeccontext import VideoCodecContext
26
+ from av.video.format import VideoFormat
27
+ from av.video.frame import VideoFrame
28
+ from av.video.stream import VideoStream
29
+
30
+ __all__ = (
31
+ "__version__",
32
+ "time_base",
33
+ "ffmpeg_version_info",
34
+ "library_versions",
35
+ "AudioCodecContext",
36
+ "AudioFifo",
37
+ "AudioFormat",
38
+ "AudioFrame",
39
+ "AudioLayout",
40
+ "AudioResampler",
41
+ "AudioStream",
42
+ "BitStreamFilterContext",
43
+ "bitstream_filters_available",
44
+ "Codec",
45
+ "codecs_available",
46
+ "CodecContext",
47
+ "open",
48
+ "ContainerFormat",
49
+ "formats_available",
50
+ "Packet",
51
+ "VideoCodecContext",
52
+ "VideoFormat",
53
+ "VideoFrame",
54
+ "VideoStream",
55
+ )
56
+
57
+
58
+ def get_include() -> str:
59
+ """
60
+ Returns the path to the `include` folder to be used when building extensions to av.
61
+ """
62
+ import os
63
+
64
+ # Installed package
65
+ include_path = os.path.join(os.path.dirname(__file__), "include")
66
+ if os.path.exists(include_path):
67
+ return include_path
68
+ # Running from source directory
69
+ return os.path.join(os.path.dirname(__file__), os.pardir, "include")
lib/python3.10/site-packages/av/_core.pyx ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+
3
+ # Initialise libraries.
4
+ lib.avformat_network_init()
5
+ lib.avdevice_register_all()
6
+
7
+ # Exports.
8
+ time_base = lib.AV_TIME_BASE
9
+
10
+
11
+ cdef decode_version(v):
12
+ if v < 0:
13
+ return (-1, -1, -1)
14
+
15
+ cdef int major = (v >> 16) & 0xff
16
+ cdef int minor = (v >> 8) & 0xff
17
+ cdef int micro = (v) & 0xff
18
+
19
+ return (major, minor, micro)
20
+
21
+ # Return an informative version string.
22
+ # This usually is the actual release version number or a git commit
23
+ # description. This string has no fixed format and can change any time. It
24
+ # should never be parsed by code.
25
+ ffmpeg_version_info = lib.av_version_info()
26
+
27
+ library_meta = {
28
+ "libavutil": dict(
29
+ version=decode_version(lib.avutil_version()),
30
+ configuration=lib.avutil_configuration(),
31
+ license=lib.avutil_license()
32
+ ),
33
+ "libavcodec": dict(
34
+ version=decode_version(lib.avcodec_version()),
35
+ configuration=lib.avcodec_configuration(),
36
+ license=lib.avcodec_license()
37
+ ),
38
+ "libavformat": dict(
39
+ version=decode_version(lib.avformat_version()),
40
+ configuration=lib.avformat_configuration(),
41
+ license=lib.avformat_license()
42
+ ),
43
+ "libavdevice": dict(
44
+ version=decode_version(lib.avdevice_version()),
45
+ configuration=lib.avdevice_configuration(),
46
+ license=lib.avdevice_license()
47
+ ),
48
+ "libavfilter": dict(
49
+ version=decode_version(lib.avfilter_version()),
50
+ configuration=lib.avfilter_configuration(),
51
+ license=lib.avfilter_license()
52
+ ),
53
+ "libswscale": dict(
54
+ version=decode_version(lib.swscale_version()),
55
+ configuration=lib.swscale_configuration(),
56
+ license=lib.swscale_license()
57
+ ),
58
+ "libswresample": dict(
59
+ version=decode_version(lib.swresample_version()),
60
+ configuration=lib.swresample_configuration(),
61
+ license=lib.swresample_license()
62
+ ),
63
+ }
64
+
65
+ library_versions = {name: meta["version"] for name, meta in library_meta.items()}
lib/python3.10/site-packages/av/bitstream.pyx ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+ from libc.errno cimport EAGAIN
3
+
4
+ from av.error cimport err_check
5
+ from av.packet cimport Packet
6
+ from av.stream cimport Stream
7
+
8
+
9
+ cdef class BitStreamFilterContext:
10
+ """
11
+ Initializes a bitstream filter: a way to directly modify packet data.
12
+
13
+ Wraps :ffmpeg:`AVBSFContext`
14
+
15
+ :param Stream in_stream: A stream that defines the input codec for the bitfilter.
16
+ :param Stream out_stream: A stream whose codec is overwritten using the output parameters from the bitfilter.
17
+ """
18
+ def __cinit__(self, filter_description, Stream in_stream=None, Stream out_stream=None):
19
+ cdef int res
20
+ cdef char *filter_str = filter_description
21
+
22
+ with nogil:
23
+ res = lib.av_bsf_list_parse_str(filter_str, &self.ptr)
24
+ err_check(res)
25
+
26
+ if in_stream is not None:
27
+ with nogil:
28
+ res = lib.avcodec_parameters_copy(self.ptr.par_in, in_stream.ptr.codecpar)
29
+ err_check(res)
30
+
31
+ with nogil:
32
+ res = lib.av_bsf_init(self.ptr)
33
+ err_check(res)
34
+
35
+ if out_stream is not None:
36
+ with nogil:
37
+ res = lib.avcodec_parameters_copy(out_stream.ptr.codecpar, self.ptr.par_out)
38
+ err_check(res)
39
+ lib.avcodec_parameters_to_context(out_stream.codec_context.ptr, out_stream.ptr.codecpar)
40
+
41
+ def __dealloc__(self):
42
+ if self.ptr:
43
+ lib.av_bsf_free(&self.ptr)
44
+
45
+ cpdef filter(self, Packet packet=None):
46
+ """
47
+ Processes a packet based on the filter_description set during initialization.
48
+ Multiple packets may be created.
49
+
50
+ :type: list[Packet]
51
+ """
52
+ cdef int res
53
+ cdef Packet new_packet
54
+
55
+ with nogil:
56
+ res = lib.av_bsf_send_packet(self.ptr, packet.ptr if packet is not None else NULL)
57
+ err_check(res)
58
+
59
+ output = []
60
+ while True:
61
+ new_packet = Packet()
62
+ with nogil:
63
+ res = lib.av_bsf_receive_packet(self.ptr, new_packet.ptr)
64
+
65
+ if res == -EAGAIN or res == lib.AVERROR_EOF:
66
+ return output
67
+
68
+ err_check(res)
69
+ if res:
70
+ return output
71
+
72
+ output.append(new_packet)
73
+
74
+ cpdef flush(self):
75
+ """
76
+ Reset the internal state of the filter.
77
+ Should be called e.g. when seeking.
78
+ Can be used to make the filter usable again after draining it with EOF marker packet.
79
+ """
80
+ lib.av_bsf_flush(self.ptr)
81
+
82
+ cdef get_filter_names():
83
+ names = set()
84
+ cdef const lib.AVBitStreamFilter *ptr
85
+ cdef void *opaque = NULL
86
+ while True:
87
+ ptr = lib.av_bsf_iterate(&opaque)
88
+ if ptr:
89
+ names.add(ptr.name)
90
+ else:
91
+ break
92
+
93
+ return names
94
+
95
+ bitstream_filters_available = get_filter_names()
lib/python3.10/site-packages/av/buffer.pxd ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+
2
+ cdef class Buffer:
3
+
4
+ cdef size_t _buffer_size(self)
5
+ cdef void* _buffer_ptr(self)
6
+ cdef bint _buffer_writable(self)
lib/python3.10/site-packages/av/buffer.pyi ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # When Python 3.12 becomes our lowest supported version, we could make this
2
+ # class inherit `collections.abc.Buffer`.
3
+
4
+ class Buffer:
5
+ buffer_size: int
6
+ buffer_ptr: int
7
+ def update(self, input: bytes) -> None: ...
8
+ def __buffer__(self, flags: int) -> memoryview: ...
9
+ def __bytes__(self) -> bytes: ...
lib/python3.10/site-packages/av/bytesource.pxd ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from cpython.buffer cimport Py_buffer
2
+
3
+
4
+ cdef class ByteSource:
5
+
6
+ cdef object owner
7
+
8
+ cdef bint has_view
9
+ cdef Py_buffer view
10
+
11
+ cdef unsigned char *ptr
12
+ cdef size_t length
13
+
14
+ cdef ByteSource bytesource(object, bint allow_none=*)
lib/python3.10/site-packages/av/bytesource.pyx ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from cpython.buffer cimport (
2
+ PyBUF_SIMPLE,
3
+ PyBuffer_Release,
4
+ PyObject_CheckBuffer,
5
+ PyObject_GetBuffer,
6
+ )
7
+
8
+
9
+ cdef class ByteSource:
10
+ def __cinit__(self, owner):
11
+ self.owner = owner
12
+
13
+ try:
14
+ self.ptr = owner
15
+ except TypeError:
16
+ pass
17
+ else:
18
+ self.length = len(owner)
19
+ return
20
+
21
+ if PyObject_CheckBuffer(owner):
22
+ # Can very likely use PyBUF_ND instead of PyBUF_SIMPLE
23
+ res = PyObject_GetBuffer(owner, &self.view, PyBUF_SIMPLE)
24
+ if not res:
25
+ self.has_view = True
26
+ self.ptr = <unsigned char *>self.view.buf
27
+ self.length = self.view.len
28
+ return
29
+
30
+ raise TypeError("expected bytes, bytearray or memoryview")
31
+
32
+ def __dealloc__(self):
33
+ if self.has_view:
34
+ PyBuffer_Release(&self.view)
35
+
36
+
37
+ cdef ByteSource bytesource(obj, bint allow_none=False):
38
+ if allow_none and obj is None:
39
+ return
40
+ elif isinstance(obj, ByteSource):
41
+ return obj
42
+ else:
43
+ return ByteSource(obj)
lib/python3.10/site-packages/av/datasets.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import errno
2
+ import logging
3
+ import os
4
+ import sys
5
+ from typing import Iterator
6
+ from urllib.request import urlopen
7
+
8
+ log = logging.getLogger(__name__)
9
+
10
+
11
+ def iter_data_dirs(check_writable: bool = False) -> Iterator[str]:
12
+ try:
13
+ yield os.environ["PYAV_TESTDATA_DIR"]
14
+ except KeyError:
15
+ pass
16
+
17
+ if os.name == "nt":
18
+ yield os.path.join(sys.prefix, "pyav", "datasets")
19
+ return
20
+
21
+ bases = [
22
+ "/usr/local/share",
23
+ "/usr/local/lib",
24
+ "/usr/share",
25
+ "/usr/lib",
26
+ ]
27
+
28
+ # Prefer the local virtualenv.
29
+ if hasattr(sys, "real_prefix"):
30
+ bases.insert(0, sys.prefix)
31
+
32
+ for base in bases:
33
+ dir_ = os.path.join(base, "pyav", "datasets")
34
+ if check_writable:
35
+ if os.path.exists(dir_):
36
+ if not os.access(dir_, os.W_OK):
37
+ continue
38
+ else:
39
+ if not os.access(base, os.W_OK):
40
+ continue
41
+ yield dir_
42
+
43
+ yield os.path.join(os.path.expanduser("~"), ".pyav", "datasets")
44
+
45
+
46
+ def cached_download(url: str, name: str) -> str:
47
+ """Download the data at a URL, and cache it under the given name.
48
+
49
+ The file is stored under `pyav/test` with the given name in the directory
50
+ :envvar:`PYAV_TESTDATA_DIR`, or the first that is writeable of:
51
+
52
+ - the current virtualenv
53
+ - ``/usr/local/share``
54
+ - ``/usr/local/lib``
55
+ - ``/usr/share``
56
+ - ``/usr/lib``
57
+ - the user's home
58
+
59
+ """
60
+
61
+ clean_name = os.path.normpath(name)
62
+ if clean_name != name:
63
+ raise ValueError(f"{name} is not normalized.")
64
+
65
+ for dir_ in iter_data_dirs():
66
+ path = os.path.join(dir_, name)
67
+ if os.path.exists(path):
68
+ return path
69
+
70
+ dir_ = next(iter_data_dirs(True))
71
+ path = os.path.join(dir_, name)
72
+
73
+ log.info(f"Downloading {url} to {path}")
74
+
75
+ response = urlopen(url)
76
+ if response.getcode() != 200:
77
+ raise ValueError(f"HTTP {response.getcode()}")
78
+
79
+ dir_ = os.path.dirname(path)
80
+ try:
81
+ os.makedirs(dir_)
82
+ except OSError as e:
83
+ if e.errno != errno.EEXIST:
84
+ raise
85
+
86
+ tmp_path = path + ".tmp"
87
+ with open(tmp_path, "wb") as fh:
88
+ while True:
89
+ chunk = response.read(8196)
90
+ if chunk:
91
+ fh.write(chunk)
92
+ else:
93
+ break
94
+
95
+ os.rename(tmp_path, path)
96
+
97
+ return path
98
+
99
+
100
+ def fate(name: str) -> str:
101
+ """Download and return a path to a sample from the FFmpeg test suite.
102
+
103
+ Data is handled by :func:`cached_download`.
104
+
105
+ See the `FFmpeg Automated Test Environment <https://www.ffmpeg.org/fate.html>`_
106
+
107
+ """
108
+ return cached_download(
109
+ "http://fate.ffmpeg.org/fate-suite/" + name,
110
+ os.path.join("fate-suite", name.replace("/", os.path.sep)),
111
+ )
112
+
113
+
114
+ def curated(name: str) -> str:
115
+ """Download and return a path to a sample that is curated by the PyAV developers.
116
+
117
+ Data is handled by :func:`cached_download`.
118
+
119
+ """
120
+ return cached_download(
121
+ "https://pyav.org/datasets/" + name,
122
+ os.path.join("pyav-curated", name.replace("/", os.path.sep)),
123
+ )
lib/python3.10/site-packages/av/descriptor.pxd ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+
3
+
4
+ cdef class Descriptor:
5
+
6
+ # These are present as:
7
+ # - AVCodecContext.av_class (same as avcodec_get_class())
8
+ # - AVFormatContext.av_class (same as avformat_get_class())
9
+ # - AVFilterContext.av_class (same as avfilter_get_class())
10
+ # - AVCodec.priv_class
11
+ # - AVOutputFormat.priv_class
12
+ # - AVInputFormat.priv_class
13
+ # - AVFilter.priv_class
14
+
15
+ cdef const lib.AVClass *ptr
16
+
17
+ cdef object _options # Option list cache.
18
+
19
+
20
+ cdef Descriptor wrap_avclass(const lib.AVClass*)
lib/python3.10/site-packages/av/descriptor.pyi ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from typing import NoReturn
2
+
3
+ from .option import Option
4
+
5
+ class Descriptor:
6
+ name: str
7
+ options: tuple[Option, ...]
lib/python3.10/site-packages/av/dictionary.pyi ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import MutableMapping
2
+ from typing import Iterator
3
+
4
+ class Dictionary(MutableMapping[str, str]):
5
+ def __getitem__(self, key: str) -> str: ...
6
+ def __setitem__(self, key: str, value: str) -> None: ...
7
+ def __delitem__(self, key: str) -> None: ...
8
+ def __len__(self) -> int: ...
9
+ def __iter__(self) -> Iterator[str]: ...
10
+ def __repr__(self) -> str: ...
lib/python3.10/site-packages/av/error.pxd ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+
2
+ cdef int stash_exception(exc_info=*)
3
+ cpdef int err_check(int res, filename=*) except -1
lib/python3.10/site-packages/av/error.pyi ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ from enum import Enum
3
+
4
+ classes: dict[int, Exception]
5
+
6
+ def code_to_tag(code: int) -> bytes: ...
7
+ def tag_to_code(tag: bytes) -> int: ...
8
+ def err_check(res: int, filename: str | None = None) -> int: ...
9
+
10
+ class FFmpegError(Exception):
11
+ errno: int | None
12
+ strerror: str | None
13
+ filename: str
14
+ log: tuple[int, tuple[int, str, str] | None]
15
+
16
+ def __init__(
17
+ self,
18
+ code: int,
19
+ message: str,
20
+ filename: str | None = None,
21
+ log: tuple[int, tuple[int, str, str] | None] | None = None,
22
+ ) -> None: ...
23
+
24
+ class LookupError(FFmpegError): ...
25
+ class HTTPError(FFmpegError): ...
26
+ class HTTPClientError(FFmpegError): ...
27
+ class UndefinedError(FFmpegError): ...
28
+ class InvalidDataError(FFmpegError, builtins.ValueError): ...
29
+ class BugError(FFmpegError, builtins.RuntimeError): ...
30
+ class BufferTooSmallError(FFmpegError, builtins.ValueError): ...
31
+ class BSFNotFoundError(LookupError): ...
32
+ class DecoderNotFoundError(LookupError): ...
33
+ class DemuxerNotFoundError(LookupError): ...
34
+ class EncoderNotFoundError(LookupError): ...
35
+ class ExitError(FFmpegError): ...
36
+ class ExternalError(FFmpegError): ...
37
+ class FilterNotFoundError(LookupError): ...
38
+ class MuxerNotFoundError(LookupError): ...
39
+ class OptionNotFoundError(LookupError): ...
40
+ class PatchWelcomeError(FFmpegError): ...
41
+ class ProtocolNotFoundError(LookupError): ...
42
+ class UnknownError(FFmpegError): ...
43
+ class ExperimentalError(FFmpegError): ...
44
+ class InputChangedError(FFmpegError): ...
45
+ class OutputChangedError(FFmpegError): ...
46
+ class HTTPBadRequestError(HTTPClientError): ...
47
+ class HTTPUnauthorizedError(HTTPClientError): ...
48
+ class HTTPForbiddenError(HTTPClientError): ...
49
+ class HTTPNotFoundError(HTTPClientError): ...
50
+ class HTTPOtherClientError(HTTPClientError): ...
51
+ class HTTPServerError(HTTPError): ...
52
+ class PyAVCallbackError(FFmpegError, builtins.RuntimeError): ...
53
+ class BrokenPipeError(FFmpegError, builtins.BrokenPipeError): ...
54
+ class ChildProcessError(FFmpegError, builtins.ChildProcessError): ...
55
+ class ConnectionAbortedError(FFmpegError, builtins.ConnectionAbortedError): ...
56
+ class ConnectionRefusedError(FFmpegError, builtins.ConnectionRefusedError): ...
57
+ class ConnectionResetError(FFmpegError, builtins.ConnectionResetError): ...
58
+ class BlockingIOError(FFmpegError, builtins.BlockingIOError): ...
59
+ class EOFError(FFmpegError, builtins.EOFError): ...
60
+ class FileExistsError(FFmpegError, builtins.FileExistsError): ...
61
+ class FileNotFoundError(FFmpegError, builtins.FileNotFoundError): ...
62
+ class InterruptedError(FFmpegError, builtins.InterruptedError): ...
63
+ class IsADirectoryError(FFmpegError, builtins.IsADirectoryError): ...
64
+ class MemoryError(FFmpegError, builtins.MemoryError): ...
65
+ class NotADirectoryError(FFmpegError, builtins.NotADirectoryError): ...
66
+ class NotImplementedError(FFmpegError, builtins.NotImplementedError): ...
67
+ class OverflowError(FFmpegError, builtins.OverflowError): ...
68
+ class OSError(FFmpegError, builtins.OSError): ...
69
+ class PermissionError(FFmpegError, builtins.PermissionError): ...
70
+ class ProcessLookupError(FFmpegError, builtins.ProcessLookupError): ...
71
+ class TimeoutError(FFmpegError, builtins.TimeoutError): ...
72
+ class ValueError(FFmpegError, builtins.ValueError): ...
lib/python3.10/site-packages/av/error.pyx ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+ from libc.stdlib cimport free, malloc
3
+
4
+ from av.logging cimport get_last_error
5
+
6
+ import errno
7
+ import os
8
+ import sys
9
+ import traceback
10
+ from threading import local
11
+
12
+ # Will get extended with all of the exceptions.
13
+ __all__ = [
14
+ "ErrorType", "FFmpegError", "LookupError", "HTTPError", "HTTPClientError",
15
+ "UndefinedError",
16
+ ]
17
+
18
+
19
+ cpdef code_to_tag(int code):
20
+ """Convert an integer error code into 4-byte tag.
21
+
22
+ >>> code_to_tag(1953719668)
23
+ b'test'
24
+
25
+ """
26
+ return bytes((
27
+ code & 0xff,
28
+ (code >> 8) & 0xff,
29
+ (code >> 16) & 0xff,
30
+ (code >> 24) & 0xff,
31
+ ))
32
+
33
+ cpdef tag_to_code(bytes tag):
34
+ """Convert a 4-byte error tag into an integer code.
35
+
36
+ >>> tag_to_code(b'test')
37
+ 1953719668
38
+
39
+ """
40
+ if len(tag) != 4:
41
+ raise ValueError("Error tags are 4 bytes.")
42
+ return (
43
+ (tag[0]) +
44
+ (tag[1] << 8) +
45
+ (tag[2] << 16) +
46
+ (tag[3] << 24)
47
+ )
48
+
49
+
50
+ class FFmpegError(Exception):
51
+ """Exception class for errors from within FFmpeg.
52
+
53
+ .. attribute:: errno
54
+
55
+ FFmpeg's integer error code.
56
+
57
+ .. attribute:: strerror
58
+
59
+ FFmpeg's error message.
60
+
61
+ .. attribute:: filename
62
+
63
+ The filename that was being operated on (if available).
64
+
65
+ .. attribute:: log
66
+
67
+ The tuple from :func:`av.logging.get_last_log`, or ``None``.
68
+
69
+ """
70
+
71
+ def __init__(self, code, message, filename=None, log=None):
72
+ self.errno = code
73
+ self.strerror = message
74
+
75
+ args = [code, message]
76
+ if filename or log:
77
+ args.append(filename)
78
+ if log:
79
+ args.append(log)
80
+ super(FFmpegError, self).__init__(*args)
81
+ self.args = tuple(args) # FileNotFoundError/etc. only pulls 2 args.
82
+
83
+ @property
84
+ def filename(self):
85
+ try:
86
+ return self.args[2]
87
+ except IndexError:
88
+ pass
89
+
90
+ @property
91
+ def log(self):
92
+ try:
93
+ return self.args[3]
94
+ except IndexError:
95
+ pass
96
+
97
+ def __str__(self):
98
+ msg = ""
99
+ if self.errno is not None:
100
+ msg = f"{msg}[Errno {self.errno}] "
101
+ if self.strerror is not None:
102
+ msg = f"{msg}{self.strerror}"
103
+ if self.filename:
104
+ msg = f"{msg}: {self.filename!r}"
105
+ if self.log:
106
+ msg = f"{msg}; last error log: [{self.log[1].strip()}] {self.log[2].strip()}"
107
+
108
+ return msg
109
+
110
+
111
+ # Our custom error, used in callbacks.
112
+ cdef int c_PYAV_STASHED_ERROR = tag_to_code(b"PyAV")
113
+ cdef str PYAV_STASHED_ERROR_message = "Error in PyAV callback"
114
+
115
+
116
+ # Bases for the FFmpeg-based exceptions.
117
+ class LookupError(FFmpegError, LookupError):
118
+ pass
119
+
120
+
121
+ class HTTPError(FFmpegError):
122
+ pass
123
+
124
+
125
+ class HTTPClientError(FFmpegError):
126
+ pass
127
+
128
+
129
+ # Tuples of (enum_name, enum_value, exc_name, exc_base).
130
+ _ffmpeg_specs = (
131
+ ("BSF_NOT_FOUND", -lib.AVERROR_BSF_NOT_FOUND, "BSFNotFoundError", LookupError),
132
+ ("BUG", -lib.AVERROR_BUG, None, RuntimeError),
133
+ ("BUFFER_TOO_SMALL", -lib.AVERROR_BUFFER_TOO_SMALL, None, ValueError),
134
+ ("DECODER_NOT_FOUND", -lib.AVERROR_DECODER_NOT_FOUND, None, LookupError),
135
+ ("DEMUXER_NOT_FOUND", -lib.AVERROR_DEMUXER_NOT_FOUND, None, LookupError),
136
+ ("ENCODER_NOT_FOUND", -lib.AVERROR_ENCODER_NOT_FOUND, None, LookupError),
137
+ ("EOF", -lib.AVERROR_EOF, "EOFError", EOFError),
138
+ ("EXIT", -lib.AVERROR_EXIT, None, None),
139
+ ("EXTERNAL", -lib.AVERROR_EXTERNAL, None, None),
140
+ ("FILTER_NOT_FOUND", -lib.AVERROR_FILTER_NOT_FOUND, None, LookupError),
141
+ ("INVALIDDATA", -lib.AVERROR_INVALIDDATA, "InvalidDataError", ValueError),
142
+ ("MUXER_NOT_FOUND", -lib.AVERROR_MUXER_NOT_FOUND, None, LookupError),
143
+ ("OPTION_NOT_FOUND", -lib.AVERROR_OPTION_NOT_FOUND, None, LookupError),
144
+ ("PATCHWELCOME", -lib.AVERROR_PATCHWELCOME, "PatchWelcomeError", None),
145
+ ("PROTOCOL_NOT_FOUND", -lib.AVERROR_PROTOCOL_NOT_FOUND, None, LookupError),
146
+ ("UNKNOWN", -lib.AVERROR_UNKNOWN, None, None),
147
+ ("EXPERIMENTAL", -lib.AVERROR_EXPERIMENTAL, None, None),
148
+ ("INPUT_CHANGED", -lib.AVERROR_INPUT_CHANGED, None, None),
149
+ ("OUTPUT_CHANGED", -lib.AVERROR_OUTPUT_CHANGED, None, None),
150
+ ("HTTP_BAD_REQUEST", -lib.AVERROR_HTTP_BAD_REQUEST, "HTTPBadRequestError", HTTPClientError),
151
+ ("HTTP_UNAUTHORIZED", -lib.AVERROR_HTTP_UNAUTHORIZED, "HTTPUnauthorizedError", HTTPClientError),
152
+ ("HTTP_FORBIDDEN", -lib.AVERROR_HTTP_FORBIDDEN, "HTTPForbiddenError", HTTPClientError),
153
+ ("HTTP_NOT_FOUND", -lib.AVERROR_HTTP_NOT_FOUND, "HTTPNotFoundError", HTTPClientError),
154
+ ("HTTP_OTHER_4XX", -lib.AVERROR_HTTP_OTHER_4XX, "HTTPOtherClientError", HTTPClientError),
155
+ ("HTTP_SERVER_ERROR", -lib.AVERROR_HTTP_SERVER_ERROR, "HTTPServerError", HTTPError),
156
+ ("PYAV_CALLBACK", c_PYAV_STASHED_ERROR, "PyAVCallbackError", RuntimeError),
157
+ )
158
+
159
+ cdef sentinel = object()
160
+
161
+
162
+ class EnumType(type):
163
+ def __new__(mcl, name, bases, attrs, *args):
164
+ # Just adapting the method signature.
165
+ return super().__new__(mcl, name, bases, attrs)
166
+
167
+ def __init__(self, name, bases, attrs, items):
168
+ self._by_name = {}
169
+ self._by_value = {}
170
+ self._all = []
171
+
172
+ for spec in items:
173
+ self._create(*spec)
174
+
175
+ def _create(self, name, value, doc=None, by_value_only=False):
176
+ # We only have one instance per value.
177
+ try:
178
+ item = self._by_value[value]
179
+ except KeyError:
180
+ item = self(sentinel, name, value, doc)
181
+ self._by_value[value] = item
182
+
183
+ return item
184
+
185
+ def __len__(self):
186
+ return len(self._all)
187
+
188
+ def __iter__(self):
189
+ return iter(self._all)
190
+
191
+ def __getitem__(self, key):
192
+ if isinstance(key, str):
193
+ return self._by_name[key]
194
+
195
+ if isinstance(key, int):
196
+ try:
197
+ return self._by_value[key]
198
+ except KeyError:
199
+ pass
200
+
201
+ raise KeyError(key)
202
+
203
+ if isinstance(key, self):
204
+ return key
205
+
206
+ raise TypeError(f"{self.__name__} indices must be str, int, or itself")
207
+
208
+ def _get(self, long value, bint create=False):
209
+ try:
210
+ return self._by_value[value]
211
+ except KeyError:
212
+ pass
213
+
214
+ if not create:
215
+ return
216
+
217
+ return self._create(f"{self.__name__.upper()}_{value}", value, by_value_only=True)
218
+
219
+ def get(self, key, default=None, create=False):
220
+ try:
221
+ return self[key]
222
+ except KeyError:
223
+ if create:
224
+ return self._get(key, create=True)
225
+ return default
226
+
227
+
228
+ cdef class EnumItem:
229
+ """An enumeration of FFmpeg's error types.
230
+
231
+ .. attribute:: tag
232
+
233
+ The FFmpeg byte tag for the error.
234
+
235
+ .. attribute:: strerror
236
+
237
+ The error message that would be returned.
238
+
239
+ """
240
+ cdef readonly str name
241
+ cdef readonly int value
242
+
243
+ def __cinit__(self, sentinel_, str name, int value, doc=None):
244
+ if sentinel_ is not sentinel:
245
+ raise RuntimeError(f"Cannot instantiate {self.__class__.__name__}.")
246
+
247
+ self.name = name
248
+ self.value = value
249
+ self.__doc__ = doc
250
+
251
+ def __repr__(self):
252
+ return f"<{self.__class__.__module__}.{self.__class__.__name__}:{self.name}(0x{self.value:x})>"
253
+
254
+ def __str__(self):
255
+ return self.name
256
+
257
+ def __int__(self):
258
+ return self.value
259
+
260
+ @property
261
+ def tag(self):
262
+ return code_to_tag(self.value)
263
+
264
+
265
+ ErrorType = EnumType("ErrorType", (EnumItem, ), {"__module__": __name__}, [x[:2] for x in _ffmpeg_specs])
266
+
267
+
268
+ for enum in ErrorType:
269
+ # Mimick the errno module.
270
+ globals()[enum.name] = enum
271
+ if enum.value == c_PYAV_STASHED_ERROR:
272
+ enum.strerror = PYAV_STASHED_ERROR_message
273
+ else:
274
+ enum.strerror = lib.av_err2str(-enum.value)
275
+
276
+
277
+ # Mimick the builtin exception types.
278
+ # See https://www.python.org/dev/peps/pep-3151/#new-exception-classes
279
+ # Use the named ones we have, otherwise default to OSError for anything in errno.
280
+
281
+ # See this command for the count of POSIX codes used:
282
+ #
283
+ # egrep -IR 'AVERROR\(E[A-Z]+\)' vendor/ffmpeg-4.2 |\
284
+ # sed -E 's/.*AVERROR\((E[A-Z]+)\).*/\1/' | \
285
+ # sort | uniq -c
286
+ #
287
+ # The biggest ones that don't map to PEP 3151 builtins:
288
+ #
289
+ # 2106 EINVAL -> ValueError
290
+ # 649 EIO -> IOError (if it is distinct from OSError)
291
+ # 4080 ENOMEM -> MemoryError
292
+ # 340 ENOSYS -> NotImplementedError
293
+ # 35 ERANGE -> OverflowError
294
+
295
+ classes = {}
296
+
297
+
298
+ def _extend_builtin(name, codes):
299
+ base = getattr(__builtins__, name, OSError)
300
+ cls = type(name, (FFmpegError, base), dict(__module__=__name__))
301
+
302
+ # Register in builder.
303
+ for code in codes:
304
+ classes[code] = cls
305
+
306
+ # Register in module.
307
+ globals()[name] = cls
308
+ __all__.append(name)
309
+
310
+ return cls
311
+
312
+
313
+ # PEP 3151 builtins.
314
+ _extend_builtin("PermissionError", (errno.EACCES, errno.EPERM))
315
+ _extend_builtin("BlockingIOError", (errno.EAGAIN, errno.EALREADY, errno.EINPROGRESS, errno.EWOULDBLOCK))
316
+ _extend_builtin("ChildProcessError", (errno.ECHILD, ))
317
+ _extend_builtin("ConnectionAbortedError", (errno.ECONNABORTED, ))
318
+ _extend_builtin("ConnectionRefusedError", (errno.ECONNREFUSED, ))
319
+ _extend_builtin("ConnectionResetError", (errno.ECONNRESET, ))
320
+ _extend_builtin("FileExistsError", (errno.EEXIST, ))
321
+ _extend_builtin("InterruptedError", (errno.EINTR, ))
322
+ _extend_builtin("IsADirectoryError", (errno.EISDIR, ))
323
+ _extend_builtin("FileNotFoundError", (errno.ENOENT, ))
324
+ _extend_builtin("NotADirectoryError", (errno.ENOTDIR, ))
325
+ _extend_builtin("BrokenPipeError", (errno.EPIPE, errno.ESHUTDOWN))
326
+ _extend_builtin("ProcessLookupError", (errno.ESRCH, ))
327
+ _extend_builtin("TimeoutError", (errno.ETIMEDOUT, ))
328
+
329
+ # Other obvious ones.
330
+ _extend_builtin("ValueError", (errno.EINVAL, ))
331
+ _extend_builtin("MemoryError", (errno.ENOMEM, ))
332
+ _extend_builtin("NotImplementedError", (errno.ENOSYS, ))
333
+ _extend_builtin("OverflowError", (errno.ERANGE, ))
334
+
335
+ # The rest of them (for now)
336
+ _extend_builtin("OSError", [code for code in errno.errorcode if code not in classes])
337
+
338
+ # Classes for the FFmpeg errors.
339
+ for enum_name, code, name, base in _ffmpeg_specs:
340
+ name = name or enum_name.title().replace("_", "") + "Error"
341
+
342
+ if base is None:
343
+ bases = (FFmpegError,)
344
+ elif issubclass(base, FFmpegError):
345
+ bases = (base,)
346
+ else:
347
+ bases = (FFmpegError, base)
348
+
349
+ cls = type(name, bases, {"__module__": __name__})
350
+
351
+ # Register in builder.
352
+ classes[code] = cls
353
+
354
+ # Register in module.
355
+ globals()[name] = cls
356
+ __all__.append(name)
357
+
358
+ del _ffmpeg_specs
359
+
360
+
361
+ # Storage for stashing.
362
+ cdef object _local = local()
363
+ cdef int _err_count = 0
364
+
365
+ cdef int stash_exception(exc_info=None):
366
+ global _err_count
367
+
368
+ existing = getattr(_local, "exc_info", None)
369
+ if existing is not None:
370
+ print >> sys.stderr, "PyAV library exception being dropped:"
371
+ traceback.print_exception(*existing)
372
+ _err_count -= 1 # Balance out the +=1 that is coming.
373
+
374
+ exc_info = exc_info or sys.exc_info()
375
+ _local.exc_info = exc_info
376
+ if exc_info:
377
+ _err_count += 1
378
+
379
+ return -c_PYAV_STASHED_ERROR
380
+
381
+
382
+ cdef int _last_log_count = 0
383
+
384
+ cpdef int err_check(int res, filename=None) except -1:
385
+ """Raise appropriate exceptions from library return code."""
386
+
387
+ global _err_count
388
+ global _last_log_count
389
+
390
+ # Check for stashed exceptions.
391
+ if _err_count:
392
+ exc_info = getattr(_local, "exc_info", None)
393
+ if exc_info is not None:
394
+ _err_count -= 1
395
+ _local.exc_info = None
396
+ raise exc_info[0], exc_info[1], exc_info[2]
397
+
398
+ if res >= 0:
399
+ return res
400
+
401
+ # Grab details from the last log.
402
+ log_count, last_log = get_last_error()
403
+ if log_count > _last_log_count:
404
+ _last_log_count = log_count
405
+ log = last_log
406
+ else:
407
+ log = None
408
+
409
+ cdef int code = -res
410
+ cdef char* error_buffer = <char*>malloc(lib.AV_ERROR_MAX_STRING_SIZE * sizeof(char))
411
+ if error_buffer == NULL:
412
+ raise MemoryError()
413
+
414
+ try:
415
+ if code == c_PYAV_STASHED_ERROR:
416
+ message = PYAV_STASHED_ERROR_message
417
+ else:
418
+ lib.av_strerror(res, error_buffer, lib.AV_ERROR_MAX_STRING_SIZE)
419
+ # Fallback to OS error string if no message
420
+ message = error_buffer or os.strerror(code)
421
+
422
+ cls = classes.get(code, UndefinedError)
423
+ raise cls(code, message, filename, log)
424
+ finally:
425
+ free(error_buffer)
426
+
427
+
428
+ class UndefinedError(FFmpegError):
429
+ """Fallback exception type in case FFmpeg returns an error we don't know about."""
430
+ pass
lib/python3.10/site-packages/av/format.pyx ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+
3
+ from av.descriptor cimport wrap_avclass
4
+
5
+ from enum import Flag
6
+
7
+
8
+ cdef object _cinit_bypass_sentinel = object()
9
+
10
+ cdef ContainerFormat build_container_format(lib.AVInputFormat* iptr, lib.AVOutputFormat* optr):
11
+ if not iptr and not optr:
12
+ raise ValueError("needs input format or output format")
13
+ cdef ContainerFormat format = ContainerFormat.__new__(ContainerFormat, _cinit_bypass_sentinel)
14
+ format.iptr = iptr
15
+ format.optr = optr
16
+ format.name = optr.name if optr else iptr.name
17
+ return format
18
+
19
+
20
+ class Flags(Flag):
21
+ no_file = lib.AVFMT_NOFILE
22
+ need_number: "Needs '%d' in filename." = lib.AVFMT_NEEDNUMBER
23
+ show_ids: "Show format stream IDs numbers." = lib.AVFMT_SHOW_IDS
24
+ global_header: "Format wants global header." = lib.AVFMT_GLOBALHEADER
25
+ no_timestamps: "Format does not need / have any timestamps." = lib.AVFMT_NOTIMESTAMPS
26
+ generic_index: "Use generic index building code." = lib.AVFMT_GENERIC_INDEX
27
+ ts_discont: "Format allows timestamp discontinuities" = lib.AVFMT_TS_DISCONT
28
+ variable_fps: "Format allows variable fps." = lib.AVFMT_VARIABLE_FPS
29
+ no_dimensions: "Format does not need width/height" = lib.AVFMT_NODIMENSIONS
30
+ no_streams: "Format does not require any streams" = lib.AVFMT_NOSTREAMS
31
+ no_bin_search: "Format does not allow to fall back on binary search via read_timestamp" = lib.AVFMT_NOBINSEARCH
32
+ no_gen_search: "Format does not allow to fall back on generic search" = lib.AVFMT_NOGENSEARCH
33
+ no_byte_seek: "Format does not allow seeking by bytes" = lib.AVFMT_NO_BYTE_SEEK
34
+ allow_flush: "Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function." = lib.AVFMT_ALLOW_FLUSH
35
+ ts_nonstrict: "Format does not require strictly increasing timestamps, but they must still be monotonic." = lib.AVFMT_TS_NONSTRICT
36
+ ts_negative: "Format allows muxing negative timestamps." = lib.AVFMT_TS_NEGATIVE
37
+ # If not set the timestamp will be shifted in `av_write_frame()` and `av_interleaved_write_frame()`
38
+ # so they start from 0. The user or muxer can override this through AVFormatContext.avoid_negative_ts
39
+ seek_to_pts: "Seeking is based on PTS" = lib.AVFMT_SEEK_TO_PTS
40
+
41
+ cdef class ContainerFormat:
42
+ """Descriptor of a container format.
43
+
44
+ :param str name: The name of the format.
45
+ :param str mode: ``'r'`` or ``'w'`` for input and output formats; defaults
46
+ to None which will grab either.
47
+
48
+ """
49
+
50
+ def __cinit__(self, name, mode=None):
51
+ if name is _cinit_bypass_sentinel:
52
+ return
53
+
54
+ # We need to hold onto the original name because AVInputFormat.name is
55
+ # actually comma-separated, and so we need to remember which one this was.
56
+ self.name = name
57
+
58
+ # Searches comma-separated names.
59
+ if mode is None or mode == "r":
60
+ self.iptr = lib.av_find_input_format(name)
61
+
62
+ if mode is None or mode == "w":
63
+ self.optr = lib.av_guess_format(name, NULL, NULL)
64
+
65
+ if not self.iptr and not self.optr:
66
+ raise ValueError(f"no container format {name!r}")
67
+
68
+ def __repr__(self):
69
+ return f"<av.{self.__class__.__name__} {self.name!r}>"
70
+
71
+ @property
72
+ def descriptor(self):
73
+ if self.iptr:
74
+ return wrap_avclass(self.iptr.priv_class)
75
+ else:
76
+ return wrap_avclass(self.optr.priv_class)
77
+
78
+ @property
79
+ def options(self):
80
+ return self.descriptor.options
81
+
82
+ @property
83
+ def input(self):
84
+ """An input-only view of this format."""
85
+ if self.iptr == NULL:
86
+ return None
87
+ elif self.optr == NULL:
88
+ return self
89
+ else:
90
+ return build_container_format(self.iptr, NULL)
91
+
92
+ @property
93
+ def output(self):
94
+ """An output-only view of this format."""
95
+ if self.optr == NULL:
96
+ return None
97
+ elif self.iptr == NULL:
98
+ return self
99
+ else:
100
+ return build_container_format(NULL, self.optr)
101
+
102
+ @property
103
+ def is_input(self):
104
+ return self.iptr != NULL
105
+
106
+ @property
107
+ def is_output(self):
108
+ return self.optr != NULL
109
+
110
+ @property
111
+ def long_name(self):
112
+ # We prefer the output names since the inputs may represent
113
+ # multiple formats.
114
+ return self.optr.long_name if self.optr else self.iptr.long_name
115
+
116
+ @property
117
+ def extensions(self):
118
+ cdef set exts = set()
119
+ if self.iptr and self.iptr.extensions:
120
+ exts.update(self.iptr.extensions.split(","))
121
+ if self.optr and self.optr.extensions:
122
+ exts.update(self.optr.extensions.split(","))
123
+ return exts
124
+
125
+ @property
126
+ def flags(self):
127
+ """
128
+ Get the flags bitmask for the format.
129
+
130
+ :rtype: int
131
+ """
132
+ return (
133
+ (self.iptr.flags if self.iptr else 0) |
134
+ (self.optr.flags if self.optr else 0)
135
+ )
136
+
137
+ @property
138
+ def no_file(self):
139
+ return bool(self.flags & lib.AVFMT_NOFILE)
140
+
141
+
142
+ cdef get_output_format_names():
143
+ names = set()
144
+ cdef const lib.AVOutputFormat *ptr
145
+ cdef void *opaque = NULL
146
+ while True:
147
+ ptr = lib.av_muxer_iterate(&opaque)
148
+ if ptr:
149
+ names.add(ptr.name)
150
+ else:
151
+ break
152
+ return names
153
+
154
+ cdef get_input_format_names():
155
+ names = set()
156
+ cdef const lib.AVInputFormat *ptr
157
+ cdef void *opaque = NULL
158
+ while True:
159
+ ptr = lib.av_demuxer_iterate(&opaque)
160
+ if ptr:
161
+ names.add(ptr.name)
162
+ else:
163
+ break
164
+ return names
165
+
166
+ formats_available = get_output_format_names()
167
+ formats_available.update(get_input_format_names())
168
+
169
+
170
+ format_descriptor = wrap_avclass(lib.avformat_get_class())
lib/python3.10/site-packages/av/frame.pxd ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+
3
+ from av.packet cimport Packet
4
+ from av.sidedata.sidedata cimport _SideDataContainer
5
+
6
+
7
+ cdef class Frame:
8
+ cdef lib.AVFrame *ptr
9
+ # We define our own time.
10
+ cdef lib.AVRational _time_base
11
+ cdef _rebase_time(self, lib.AVRational)
12
+ cdef _SideDataContainer _side_data
13
+ cdef _copy_internal_attributes(self, Frame source, bint data_layout=?)
14
+ cdef _init_user_attributes(self)
lib/python3.10/site-packages/av/logging.pxd ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+
2
+ cpdef get_last_error()
lib/python3.10/site-packages/av/logging.pyi ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable
2
+
3
+ PANIC: int
4
+ FATAL: int
5
+ ERROR: int
6
+ WARNING: int
7
+ INFO: int
8
+ VERBOSE: int
9
+ DEBUG: int
10
+ TRACE: int
11
+ CRITICAL: int
12
+
13
+ def adapt_level(level: int) -> int: ...
14
+ def get_level() -> int | None: ...
15
+ def set_level(level: int | None) -> None: ...
16
+ def set_libav_level(level: int) -> None: ...
17
+ def restore_default_callback() -> None: ...
18
+ def get_skip_repeated() -> bool: ...
19
+ def set_skip_repeated(v: bool) -> None: ...
20
+ def get_last_error() -> tuple[int, tuple[int, str, str] | None]: ...
21
+ def log(level: int, name: str, message: str) -> None: ...
22
+
23
+ class Capture:
24
+ logs: list[tuple[int, str, str]]
25
+
26
+ def __init__(self, local: bool = True) -> None: ...
27
+ def __enter__(self) -> list[tuple[int, str, str]]: ...
28
+ def __exit__(
29
+ self,
30
+ type_: type | None,
31
+ value: Exception | None,
32
+ traceback: Callable[..., Any] | None,
33
+ ) -> None: ...
lib/python3.10/site-packages/av/opaque.pxd ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+
3
+
4
+ cdef class OpaqueContainer:
5
+ cdef dict _by_name
6
+
7
+ cdef lib.AVBufferRef *add(self, object v)
8
+ cdef object get(self, bytes name)
9
+ cdef object pop(self, bytes name)
10
+
11
+
12
+ cdef OpaqueContainer opaque_container
lib/python3.10/site-packages/av/option.pxd ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+
3
+
4
+ cdef class BaseOption:
5
+
6
+ cdef const lib.AVOption *ptr
7
+
8
+
9
+ cdef class Option(BaseOption):
10
+
11
+ cdef readonly tuple choices
12
+
13
+
14
+ cdef class OptionChoice(BaseOption):
15
+
16
+ cdef readonly bint is_default
17
+
18
+
19
+ cdef Option wrap_option(tuple choices, const lib.AVOption *ptr)
20
+
21
+ cdef OptionChoice wrap_option_choice(const lib.AVOption *ptr, bint is_default)
lib/python3.10/site-packages/av/option.pyi ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum, Flag
2
+ from typing import cast
3
+
4
+ class OptionType(Enum):
5
+ FLAGS = cast(int, ...)
6
+ INT = cast(int, ...)
7
+ INT64 = cast(int, ...)
8
+ DOUBLE = cast(int, ...)
9
+ FLOAT = cast(int, ...)
10
+ STRING = cast(int, ...)
11
+ RATIONAL = cast(int, ...)
12
+ BINARY = cast(int, ...)
13
+ DICT = cast(int, ...)
14
+ CONST = cast(int, ...)
15
+ IMAGE_SIZE = cast(int, ...)
16
+ PIXEL_FMT = cast(int, ...)
17
+ SAMPLE_FMT = cast(int, ...)
18
+ VIDEO_RATE = cast(int, ...)
19
+ DURATION = cast(int, ...)
20
+ COLOR = cast(int, ...)
21
+ CHANNEL_LAYOUT = cast(int, ...)
22
+ BOOL = cast(int, ...)
23
+
24
+ class OptionFlags(Flag):
25
+ ENCODING_PARAM = cast(int, ...)
26
+ DECODING_PARAM = cast(int, ...)
27
+ AUDIO_PARAM = cast(int, ...)
28
+ VIDEO_PARAM = cast(int, ...)
29
+ SUBTITLE_PARAM = cast(int, ...)
30
+ EXPORT = cast(int, ...)
31
+ READONLY = cast(int, ...)
32
+ FILTERING_PARAM = cast(int, ...)
33
+
34
+ class BaseOption:
35
+ name: str
36
+ help: str
37
+ flags: int
38
+ is_encoding_param: bool
39
+ is_decoding_param: bool
40
+ is_audio_param: bool
41
+ is_video_param: bool
42
+ is_subtitle_param: bool
43
+ is_export: bool
44
+ is_readonly: bool
45
+ is_filtering_param: bool
46
+
47
+ class Option(BaseOption):
48
+ type: OptionType
49
+ offset: int
50
+ default: int
51
+ min: int
52
+ max: int
53
+
54
+ class OptionChoice(BaseOption):
55
+ value: int
lib/python3.10/site-packages/av/option.pyx ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+
3
+ from av.utils cimport flag_in_bitfield
4
+
5
+ from enum import Enum, Flag
6
+
7
+
8
+ cdef object _cinit_sentinel = object()
9
+
10
+ cdef Option wrap_option(tuple choices, const lib.AVOption *ptr):
11
+ if ptr == NULL:
12
+ return None
13
+ cdef Option obj = Option(_cinit_sentinel)
14
+ obj.ptr = ptr
15
+ obj.choices = choices
16
+ return obj
17
+
18
+
19
+ class OptionType(Enum):
20
+ FLAGS = lib.AV_OPT_TYPE_FLAGS
21
+ INT = lib.AV_OPT_TYPE_INT
22
+ INT64 = lib.AV_OPT_TYPE_INT64
23
+ DOUBLE = lib.AV_OPT_TYPE_DOUBLE
24
+ FLOAT = lib.AV_OPT_TYPE_FLOAT
25
+ STRING = lib.AV_OPT_TYPE_STRING
26
+ RATIONAL = lib.AV_OPT_TYPE_RATIONAL
27
+ BINARY = lib.AV_OPT_TYPE_BINARY
28
+ DICT = lib.AV_OPT_TYPE_DICT
29
+ UINT64 = lib.AV_OPT_TYPE_UINT64
30
+ CONST = lib.AV_OPT_TYPE_CONST
31
+ IMAGE_SIZE = lib.AV_OPT_TYPE_IMAGE_SIZE
32
+ PIXEL_FMT = lib.AV_OPT_TYPE_PIXEL_FMT
33
+ SAMPLE_FMT = lib.AV_OPT_TYPE_SAMPLE_FMT
34
+ VIDEO_RATE = lib.AV_OPT_TYPE_VIDEO_RATE
35
+ DURATION = lib.AV_OPT_TYPE_DURATION
36
+ COLOR = lib.AV_OPT_TYPE_COLOR
37
+ CHANNEL_LAYOUT = lib.AV_OPT_TYPE_CHLAYOUT
38
+ BOOL = lib.AV_OPT_TYPE_BOOL
39
+
40
+ cdef tuple _INT_TYPES = (
41
+ lib.AV_OPT_TYPE_FLAGS,
42
+ lib.AV_OPT_TYPE_INT,
43
+ lib.AV_OPT_TYPE_INT64,
44
+ lib.AV_OPT_TYPE_PIXEL_FMT,
45
+ lib.AV_OPT_TYPE_SAMPLE_FMT,
46
+ lib.AV_OPT_TYPE_DURATION,
47
+ lib.AV_OPT_TYPE_CHLAYOUT,
48
+ lib.AV_OPT_TYPE_BOOL,
49
+ )
50
+
51
+ class OptionFlags(Flag):
52
+ ENCODING_PARAM = lib.AV_OPT_FLAG_ENCODING_PARAM
53
+ DECODING_PARAM = lib.AV_OPT_FLAG_DECODING_PARAM
54
+ AUDIO_PARAM = lib.AV_OPT_FLAG_AUDIO_PARAM
55
+ VIDEO_PARAM = lib.AV_OPT_FLAG_VIDEO_PARAM
56
+ SUBTITLE_PARAM = lib.AV_OPT_FLAG_SUBTITLE_PARAM
57
+ EXPORT = lib.AV_OPT_FLAG_EXPORT
58
+ READONLY = lib.AV_OPT_FLAG_READONLY
59
+ FILTERING_PARAM = lib.AV_OPT_FLAG_FILTERING_PARAM
60
+
61
+
62
+ cdef class BaseOption:
63
+ def __cinit__(self, sentinel):
64
+ if sentinel is not _cinit_sentinel:
65
+ raise RuntimeError(f"Cannot construct av.{self.__class__.__name__}")
66
+
67
+ @property
68
+ def name(self):
69
+ return self.ptr.name
70
+
71
+ @property
72
+ def help(self):
73
+ return self.ptr.help if self.ptr.help != NULL else ""
74
+
75
+ @property
76
+ def flags(self):
77
+ return self.ptr.flags
78
+
79
+ # Option flags
80
+ @property
81
+ def is_encoding_param(self):
82
+ return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_ENCODING_PARAM)
83
+ @property
84
+ def is_decoding_param(self):
85
+ return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_DECODING_PARAM)
86
+ @property
87
+ def is_audio_param(self):
88
+ return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_AUDIO_PARAM)
89
+ @property
90
+ def is_video_param(self):
91
+ return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_VIDEO_PARAM)
92
+ @property
93
+ def is_subtitle_param(self):
94
+ return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_SUBTITLE_PARAM)
95
+ @property
96
+ def is_export(self):
97
+ return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_EXPORT)
98
+ @property
99
+ def is_readonly(self):
100
+ return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_READONLY)
101
+ @property
102
+ def is_filtering_param(self):
103
+ return flag_in_bitfield(self.ptr.flags, lib.AV_OPT_FLAG_FILTERING_PARAM)
104
+
105
+
106
+ cdef class Option(BaseOption):
107
+ @property
108
+ def type(self):
109
+ return OptionType(self.ptr.type)
110
+
111
+ @property
112
+ def offset(self):
113
+ """
114
+ This can be used to find aliases of an option.
115
+ Options in a particular descriptor with the same offset are aliases.
116
+ """
117
+ return self.ptr.offset
118
+
119
+ @property
120
+ def default(self):
121
+ if self.ptr.type in _INT_TYPES:
122
+ return self.ptr.default_val.i64
123
+ if self.ptr.type in (lib.AV_OPT_TYPE_DOUBLE, lib.AV_OPT_TYPE_FLOAT,
124
+ lib.AV_OPT_TYPE_RATIONAL):
125
+ return self.ptr.default_val.dbl
126
+ if self.ptr.type in (lib.AV_OPT_TYPE_STRING, lib.AV_OPT_TYPE_BINARY,
127
+ lib.AV_OPT_TYPE_IMAGE_SIZE, lib.AV_OPT_TYPE_VIDEO_RATE,
128
+ lib.AV_OPT_TYPE_COLOR):
129
+ return self.ptr.default_val.str if self.ptr.default_val.str != NULL else ""
130
+
131
+ def _norm_range(self, value):
132
+ if self.ptr.type in _INT_TYPES:
133
+ return int(value)
134
+ return value
135
+
136
+ @property
137
+ def min(self):
138
+ return self._norm_range(self.ptr.min)
139
+
140
+ @property
141
+ def max(self):
142
+ return self._norm_range(self.ptr.max)
143
+
144
+ def __repr__(self):
145
+ return (
146
+ f"<av.{self.__class__.__name__} {self.name}"
147
+ f" ({self.type} at *0x{self.offset:x}) at 0x{id(self):x}>"
148
+ )
149
+
150
+
151
+ cdef OptionChoice wrap_option_choice(const lib.AVOption *ptr, bint is_default):
152
+ if ptr == NULL:
153
+ return None
154
+
155
+ cdef OptionChoice obj = OptionChoice(_cinit_sentinel)
156
+ obj.ptr = ptr
157
+ obj.is_default = is_default
158
+ return obj
159
+
160
+
161
+ cdef class OptionChoice(BaseOption):
162
+ """
163
+ Represents AV_OPT_TYPE_CONST options which are essentially
164
+ choices of non-const option with same unit.
165
+ """
166
+
167
+ @property
168
+ def value(self):
169
+ return self.ptr.default_val.i64
170
+
171
+ def __repr__(self):
172
+ return f"<av.{self.__class__.__name__} {self.name} at 0x{id(self):x}>"
lib/python3.10/site-packages/av/packet.pxd ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+
3
+ from av.buffer cimport Buffer
4
+ from av.bytesource cimport ByteSource
5
+ from av.stream cimport Stream
6
+
7
+
8
+ cdef class Packet(Buffer):
9
+
10
+ cdef lib.AVPacket* ptr
11
+
12
+ cdef Stream _stream
13
+
14
+ # We track our own time.
15
+ cdef lib.AVRational _time_base
16
+ cdef _rebase_time(self, lib.AVRational)
17
+
18
+ # Hold onto the original reference.
19
+ cdef ByteSource source
20
+ cdef size_t _buffer_size(self)
21
+ cdef void* _buffer_ptr(self)
lib/python3.10/site-packages/av/packet.pyi ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fractions import Fraction
2
+
3
+ from av.subtitles.subtitle import SubtitleSet
4
+
5
+ from .buffer import Buffer
6
+ from .stream import Stream
7
+
8
+ class Packet(Buffer):
9
+ stream: Stream
10
+ stream_index: int
11
+ time_base: Fraction
12
+ pts: int | None
13
+ dts: int
14
+ pos: int | None
15
+ size: int
16
+ duration: int | None
17
+ opaque: object
18
+ is_keyframe: bool
19
+ is_corrupt: bool
20
+ is_discard: bool
21
+ is_trusted: bool
22
+ is_disposable: bool
23
+
24
+ def __init__(self, input: int | bytes | None = None) -> None: ...
25
+ def decode(self) -> list[SubtitleSet]: ...
lib/python3.10/site-packages/av/plane.pyx ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ cdef class Plane(Buffer):
3
+ """
4
+ Base class for audio and video planes.
5
+
6
+ See also :class:`~av.audio.plane.AudioPlane` and :class:`~av.video.plane.VideoPlane`.
7
+ """
8
+
9
+ def __cinit__(self, Frame frame, int index):
10
+ self.frame = frame
11
+ self.index = index
12
+
13
+ def __repr__(self):
14
+ return (
15
+ f"<av.{self.__class__.__name__} {self.buffer_size} bytes; "
16
+ f"buffer_ptr=0x{self.buffer_ptr:x}; at 0x{id(self):x}>"
17
+ )
18
+
19
+ cdef void* _buffer_ptr(self):
20
+ return self.frame.ptr.extended_data[self.index]
lib/python3.10/site-packages/av/py.typed ADDED
File without changes
lib/python3.10/site-packages/av/stream.pxd ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+
3
+ from av.codec.context cimport CodecContext
4
+ from av.container.core cimport Container
5
+ from av.frame cimport Frame
6
+ from av.packet cimport Packet
7
+
8
+
9
+ cdef class Stream:
10
+ cdef lib.AVStream *ptr
11
+
12
+ # Stream attributes.
13
+ cdef readonly Container container
14
+ cdef readonly dict metadata
15
+
16
+ # CodecContext attributes.
17
+ cdef readonly CodecContext codec_context
18
+
19
+ # Private API.
20
+ cdef _init(self, Container, lib.AVStream*, CodecContext)
21
+ cdef _finalize_for_output(self)
22
+ cdef _set_time_base(self, value)
23
+ cdef _set_id(self, value)
24
+
25
+
26
+ cdef Stream wrap_stream(Container, lib.AVStream*, CodecContext)
lib/python3.10/site-packages/av/stream.pyi ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Flag
2
+ from fractions import Fraction
3
+ from typing import Literal, cast
4
+
5
+ from .codec import Codec, CodecContext
6
+ from .container import Container
7
+
8
+ class Disposition(Flag):
9
+ default = cast(int, ...)
10
+ dub = cast(int, ...)
11
+ original = cast(int, ...)
12
+ comment = cast(int, ...)
13
+ lyrics = cast(int, ...)
14
+ karaoke = cast(int, ...)
15
+ forced = cast(int, ...)
16
+ hearing_impaired = cast(int, ...)
17
+ visual_impaired = cast(int, ...)
18
+ clean_effects = cast(int, ...)
19
+ attached_pic = cast(int, ...)
20
+ timed_thumbnails = cast(int, ...)
21
+ non_diegetic = cast(int, ...)
22
+ captions = cast(int, ...)
23
+ descriptions = cast(int, ...)
24
+ metadata = cast(int, ...)
25
+ dependent = cast(int, ...)
26
+ still_image = cast(int, ...)
27
+ multilayer = cast(int, ...)
28
+
29
+ class Stream:
30
+ name: str | None
31
+ container: Container
32
+ codec: Codec
33
+ codec_context: CodecContext
34
+ metadata: dict[str, str]
35
+ id: int
36
+ profiles: list[str]
37
+ profile: str | None
38
+ index: int
39
+ time_base: Fraction | None
40
+ average_rate: Fraction | None
41
+ base_rate: Fraction | None
42
+ guessed_rate: Fraction | None
43
+ start_time: int | None
44
+ duration: int | None
45
+ disposition: Disposition
46
+ frames: int
47
+ language: str | None
48
+ type: Literal["video", "audio", "data", "subtitle", "attachment"]
lib/python3.10/site-packages/av/stream.pyx ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport libav as lib
2
+
3
+ from enum import Flag
4
+
5
+ from av.error cimport err_check
6
+ from av.packet cimport Packet
7
+ from av.utils cimport (
8
+ avdict_to_dict,
9
+ avrational_to_fraction,
10
+ dict_to_avdict,
11
+ to_avrational,
12
+ )
13
+
14
+
15
+ class Disposition(Flag):
16
+ default = 1 << 0
17
+ dub = 1 << 1
18
+ original = 1 << 2
19
+ comment = 1 << 3
20
+ lyrics = 1 << 4
21
+ karaoke = 1 << 5
22
+ forced = 1 << 6
23
+ hearing_impaired = 1 << 7
24
+ visual_impaired = 1 << 8
25
+ clean_effects = 1 << 9
26
+ attached_pic = 1 << 10
27
+ timed_thumbnails = 1 << 11
28
+ non_diegetic = 1 << 12
29
+ captions = 1 << 16
30
+ descriptions = 1 << 17
31
+ metadata = 1 << 18
32
+ dependent = 1 << 19
33
+ still_image = 1 << 20
34
+ multilayer = 1 << 21
35
+
36
+
37
+ cdef object _cinit_bypass_sentinel = object()
38
+
39
+ cdef Stream wrap_stream(Container container, lib.AVStream *c_stream, CodecContext codec_context):
40
+ """Build an av.Stream for an existing AVStream.
41
+
42
+ The AVStream MUST be fully constructed and ready for use before this is
43
+ called.
44
+
45
+ """
46
+
47
+ # This better be the right one...
48
+ assert container.ptr.streams[c_stream.index] == c_stream
49
+
50
+ cdef Stream py_stream
51
+
52
+ if c_stream.codecpar.codec_type == lib.AVMEDIA_TYPE_VIDEO:
53
+ from av.video.stream import VideoStream
54
+ py_stream = VideoStream.__new__(VideoStream, _cinit_bypass_sentinel)
55
+ elif c_stream.codecpar.codec_type == lib.AVMEDIA_TYPE_AUDIO:
56
+ from av.audio.stream import AudioStream
57
+ py_stream = AudioStream.__new__(AudioStream, _cinit_bypass_sentinel)
58
+ elif c_stream.codecpar.codec_type == lib.AVMEDIA_TYPE_SUBTITLE:
59
+ from av.subtitles.stream import SubtitleStream
60
+ py_stream = SubtitleStream.__new__(SubtitleStream, _cinit_bypass_sentinel)
61
+ elif c_stream.codecpar.codec_type == lib.AVMEDIA_TYPE_ATTACHMENT:
62
+ from av.attachments.stream import AttachmentStream
63
+ py_stream = AttachmentStream.__new__(AttachmentStream, _cinit_bypass_sentinel)
64
+ elif c_stream.codecpar.codec_type == lib.AVMEDIA_TYPE_DATA:
65
+ from av.data.stream import DataStream
66
+ py_stream = DataStream.__new__(DataStream, _cinit_bypass_sentinel)
67
+ else:
68
+ py_stream = Stream.__new__(Stream, _cinit_bypass_sentinel)
69
+
70
+ py_stream._init(container, c_stream, codec_context)
71
+ return py_stream
72
+
73
+
74
+ cdef class Stream:
75
+ """
76
+ A single stream of audio, video or subtitles within a :class:`.Container`.
77
+
78
+ ::
79
+
80
+ >>> fh = av.open(video_path)
81
+ >>> stream = fh.streams.video[0]
82
+ >>> stream
83
+ <av.VideoStream #0 h264, yuv420p 1280x720 at 0x...>
84
+
85
+ This encapsulates a :class:`.CodecContext`, located at :attr:`Stream.codec_context`.
86
+ Attribute access is passed through to that context when attributes are missing
87
+ on the stream itself. E.g. ``stream.options`` will be the options on the
88
+ context.
89
+ """
90
+
91
+ def __cinit__(self, name):
92
+ if name is _cinit_bypass_sentinel:
93
+ return
94
+ raise RuntimeError("cannot manually instantiate Stream")
95
+
96
+ cdef _init(self, Container container, lib.AVStream *stream, CodecContext codec_context):
97
+ self.container = container
98
+ self.ptr = stream
99
+
100
+ self.codec_context = codec_context
101
+ if self.codec_context:
102
+ self.codec_context.stream_index = stream.index
103
+
104
+ self.metadata = avdict_to_dict(
105
+ stream.metadata,
106
+ encoding=self.container.metadata_encoding,
107
+ errors=self.container.metadata_errors,
108
+ )
109
+
110
+ def __repr__(self):
111
+ name = getattr(self, "name", None)
112
+ return (
113
+ f"<av.{self.__class__.__name__} #{self.index} {self.type or '<notype>'}/"
114
+ f"{name or '<nocodec>'} at 0x{id(self):x}>"
115
+ )
116
+
117
+ def __setattr__(self, name, value):
118
+ if name == "id":
119
+ self._set_id(value)
120
+ return
121
+ if name == "disposition":
122
+ self.ptr.disposition = value
123
+ return
124
+
125
+ # Convenience setter for codec context properties.
126
+ if self.codec_context is not None:
127
+ setattr(self.codec_context, name, value)
128
+
129
+ if name == "time_base":
130
+ self._set_time_base(value)
131
+
132
+ cdef _finalize_for_output(self):
133
+
134
+ dict_to_avdict(
135
+ &self.ptr.metadata, self.metadata,
136
+ encoding=self.container.metadata_encoding,
137
+ errors=self.container.metadata_errors,
138
+ )
139
+
140
+ if not self.ptr.time_base.num:
141
+ self.ptr.time_base = self.codec_context.ptr.time_base
142
+
143
+ # It prefers if we pass it parameters via this other object.
144
+ # Lets just copy what we want.
145
+ err_check(lib.avcodec_parameters_from_context(self.ptr.codecpar, self.codec_context.ptr))
146
+
147
+ @property
148
+ def id(self):
149
+ """
150
+ The format-specific ID of this stream.
151
+
152
+ :type: int
153
+
154
+ """
155
+ return self.ptr.id
156
+
157
+ cdef _set_id(self, value):
158
+ """
159
+ Setter used by __setattr__ for the id property.
160
+ """
161
+ if value is None:
162
+ self.ptr.id = 0
163
+ else:
164
+ self.ptr.id = value
165
+
166
+ @property
167
+ def profiles(self):
168
+ """
169
+ List the available profiles for this stream.
170
+
171
+ :type: list[str]
172
+ """
173
+ if self.codec_context:
174
+ return self.codec_context.profiles
175
+ else:
176
+ return []
177
+
178
+ @property
179
+ def profile(self):
180
+ """
181
+ The profile of this stream.
182
+
183
+ :type: str
184
+ """
185
+ if self.codec_context:
186
+ return self.codec_context.profile
187
+ else:
188
+ return None
189
+
190
+ @property
191
+ def index(self):
192
+ """
193
+ The index of this stream in its :class:`.Container`.
194
+
195
+ :type: int
196
+ """
197
+ return self.ptr.index
198
+
199
+
200
+ @property
201
+ def time_base(self):
202
+ """
203
+ The unit of time (in fractional seconds) in which timestamps are expressed.
204
+
205
+ :type: fractions.Fraction | None
206
+
207
+ """
208
+ return avrational_to_fraction(&self.ptr.time_base)
209
+
210
+ cdef _set_time_base(self, value):
211
+ """
212
+ Setter used by __setattr__ for the time_base property.
213
+ """
214
+ to_avrational(value, &self.ptr.time_base)
215
+
216
+ @property
217
+ def start_time(self):
218
+ """
219
+ The presentation timestamp in :attr:`time_base` units of the first
220
+ frame in this stream.
221
+
222
+ :type: int | None
223
+ """
224
+ if self.ptr.start_time != lib.AV_NOPTS_VALUE:
225
+ return self.ptr.start_time
226
+
227
+ @property
228
+ def duration(self):
229
+ """
230
+ The duration of this stream in :attr:`time_base` units.
231
+
232
+ :type: int | None
233
+
234
+ """
235
+ if self.ptr.duration != lib.AV_NOPTS_VALUE:
236
+ return self.ptr.duration
237
+
238
+ @property
239
+ def frames(self):
240
+ """
241
+ The number of frames this stream contains.
242
+
243
+ Returns ``0`` if it is not known.
244
+
245
+ :type: int
246
+ """
247
+ return self.ptr.nb_frames
248
+
249
+ @property
250
+ def language(self):
251
+ """
252
+ The language of the stream.
253
+
254
+ :type: str | None
255
+ """
256
+ return self.metadata.get("language")
257
+
258
+ @property
259
+ def disposition(self):
260
+ return Disposition(self.ptr.disposition)
261
+
262
+ @property
263
+ def type(self):
264
+ """
265
+ The type of the stream.
266
+
267
+ :type: Literal["audio", "video", "subtitle", "data", "attachment"]
268
+ """
269
+ return lib.av_get_media_type_string(self.ptr.codecpar.codec_type)
lib/python3.10/site-packages/av/utils.pyx ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from libc.stdint cimport uint64_t
2
+
3
+ from fractions import Fraction
4
+
5
+ cimport libav as lib
6
+
7
+ from av.error cimport err_check
8
+
9
+ # === DICTIONARIES ===
10
+ # ====================
11
+
12
+ cdef _decode(char *s, encoding, errors):
13
+ return (<bytes>s).decode(encoding, errors)
14
+
15
+ cdef bytes _encode(s, encoding, errors):
16
+ return s.encode(encoding, errors)
17
+
18
+ cdef dict avdict_to_dict(lib.AVDictionary *input, str encoding, str errors):
19
+ cdef lib.AVDictionaryEntry *element = NULL
20
+ cdef dict output = {}
21
+ while True:
22
+ element = lib.av_dict_get(input, "", element, lib.AV_DICT_IGNORE_SUFFIX)
23
+ if element == NULL:
24
+ break
25
+ output[_decode(element.key, encoding, errors)] = _decode(element.value, encoding, errors)
26
+ return output
27
+
28
+
29
+ cdef dict_to_avdict(lib.AVDictionary **dst, dict src, str encoding, str errors):
30
+ lib.av_dict_free(dst)
31
+ for key, value in src.items():
32
+ err_check(
33
+ lib.av_dict_set(
34
+ dst,
35
+ _encode(key, encoding, errors),
36
+ _encode(value, encoding, errors),
37
+ 0
38
+ )
39
+ )
40
+
41
+
42
+ # === FRACTIONS ===
43
+ # =================
44
+
45
+ cdef object avrational_to_fraction(const lib.AVRational *input):
46
+ if input.num and input.den:
47
+ return Fraction(input.num, input.den)
48
+
49
+
50
+ cdef void to_avrational(object frac, lib.AVRational *input):
51
+ input.num = frac.numerator
52
+ input.den = frac.denominator
53
+
54
+
55
+ # === OTHER ===
56
+ # =============
57
+
58
+
59
+ cdef check_ndarray(object array, object dtype, int ndim):
60
+ """
61
+ Check a numpy array has the expected data type and number of dimensions.
62
+ """
63
+ if array.dtype != dtype:
64
+ raise ValueError(f"Expected numpy array with dtype `{dtype}` but got `{array.dtype}`")
65
+ if array.ndim != ndim:
66
+ raise ValueError(f"Expected numpy array with ndim `{ndim}` but got `{array.ndim}`")
67
+
68
+
69
+ cdef flag_in_bitfield(uint64_t bitfield, uint64_t flag):
70
+ # Not every flag exists in every version of FFMpeg, so we define them to 0.
71
+ if not flag:
72
+ return None
73
+ return bool(bitfield & flag)
74
+
75
+
76
+ # === BACKWARDS COMPAT ===
77
+
78
+ from .error import err_check
lib/python3.10/site-packages/multiprocess/__init__.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Package analogous to 'threading.py' but using processes
3
+ #
4
+ # multiprocessing/__init__.py
5
+ #
6
+ # This package is intended to duplicate the functionality (and much of
7
+ # the API) of threading.py but uses processes instead of threads. A
8
+ # subpackage 'multiprocessing.dummy' has the same API but is a simple
9
+ # wrapper for 'threading'.
10
+ #
11
+ # Copyright (c) 2006-2008, R Oudkerk
12
+ # Licensed to PSF under a Contributor Agreement.
13
+ #
14
+
15
+ import sys
16
+ from . import context
17
+
18
+ __version__ = '0.70.12.2'
19
+
20
+ #
21
+ # Copy stuff from default context
22
+ #
23
+
24
+ __all__ = [x for x in dir(context._default_context) if not x.startswith('_')]
25
+ globals().update((name, getattr(context._default_context, name)) for name in __all__)
26
+
27
+ #
28
+ # XXX These should not really be documented or public.
29
+ #
30
+
31
+ SUBDEBUG = 5
32
+ SUBWARNING = 25
33
+
34
+ #
35
+ # Alias for main module -- will be reset by bootstrapping child processes
36
+ #
37
+
38
+ if '__main__' in sys.modules:
39
+ sys.modules['__mp_main__'] = sys.modules['__main__']
lib/python3.10/site-packages/multiprocess/connection.py ADDED
@@ -0,0 +1,981 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # A higher level module for using sockets (or Windows named pipes)
3
+ #
4
+ # multiprocessing/connection.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]
11
+
12
+ import io
13
+ import os
14
+ import sys
15
+ import socket
16
+ import struct
17
+ import time
18
+ import tempfile
19
+ import itertools
20
+
21
+ try:
22
+ import _multiprocess as _multiprocessing
23
+ except ImportError:
24
+ import _multiprocessing
25
+
26
+ from . import util
27
+
28
+ from . import AuthenticationError, BufferTooShort
29
+ from .context import reduction
30
+ _ForkingPickler = reduction.ForkingPickler
31
+
32
+ try:
33
+ import _winapi
34
+ from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
35
+ except ImportError:
36
+ if sys.platform == 'win32':
37
+ raise
38
+ _winapi = None
39
+
40
+ #
41
+ #
42
+ #
43
+
44
+ BUFSIZE = 8192
45
+ # A very generous timeout when it comes to local connections...
46
+ CONNECTION_TIMEOUT = 20.
47
+
48
+ _mmap_counter = itertools.count()
49
+
50
+ default_family = 'AF_INET'
51
+ families = ['AF_INET']
52
+
53
+ if hasattr(socket, 'AF_UNIX'):
54
+ default_family = 'AF_UNIX'
55
+ families += ['AF_UNIX']
56
+
57
+ if sys.platform == 'win32':
58
+ default_family = 'AF_PIPE'
59
+ families += ['AF_PIPE']
60
+
61
+
62
+ def _init_timeout(timeout=CONNECTION_TIMEOUT):
63
+ return getattr(time,'monotonic',time.time)() + timeout
64
+
65
+ def _check_timeout(t):
66
+ return getattr(time,'monotonic',time.time)() > t
67
+
68
+ #
69
+ #
70
+ #
71
+
72
+ def arbitrary_address(family):
73
+ '''
74
+ Return an arbitrary free address for the given family
75
+ '''
76
+ if family == 'AF_INET':
77
+ return ('localhost', 0)
78
+ elif family == 'AF_UNIX':
79
+ # Prefer abstract sockets if possible to avoid problems with the address
80
+ # size. When coding portable applications, some implementations have
81
+ # sun_path as short as 92 bytes in the sockaddr_un struct.
82
+ if util.abstract_sockets_supported:
83
+ return f"\0listener-{os.getpid()}-{next(_mmap_counter)}"
84
+ return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
85
+ elif family == 'AF_PIPE':
86
+ return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
87
+ (os.getpid(), next(_mmap_counter)), dir="")
88
+ else:
89
+ raise ValueError('unrecognized family')
90
+
91
+ def _validate_family(family):
92
+ '''
93
+ Checks if the family is valid for the current environment.
94
+ '''
95
+ if sys.platform != 'win32' and family == 'AF_PIPE':
96
+ raise ValueError('Family %s is not recognized.' % family)
97
+
98
+ if sys.platform == 'win32' and family == 'AF_UNIX':
99
+ # double check
100
+ if not hasattr(socket, family):
101
+ raise ValueError('Family %s is not recognized.' % family)
102
+
103
+ def address_type(address):
104
+ '''
105
+ Return the types of the address
106
+
107
+ This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
108
+ '''
109
+ if type(address) == tuple:
110
+ return 'AF_INET'
111
+ elif type(address) is str and address.startswith('\\\\'):
112
+ return 'AF_PIPE'
113
+ elif type(address) is str or util.is_abstract_socket_namespace(address):
114
+ return 'AF_UNIX'
115
+ else:
116
+ raise ValueError('address type of %r unrecognized' % address)
117
+
118
+ #
119
+ # Connection classes
120
+ #
121
+
122
+ class _ConnectionBase:
123
+ _handle = None
124
+
125
+ def __init__(self, handle, readable=True, writable=True):
126
+ handle = handle.__index__()
127
+ if handle < 0:
128
+ raise ValueError("invalid handle")
129
+ if not readable and not writable:
130
+ raise ValueError(
131
+ "at least one of `readable` and `writable` must be True")
132
+ self._handle = handle
133
+ self._readable = readable
134
+ self._writable = writable
135
+
136
+ # XXX should we use util.Finalize instead of a __del__?
137
+
138
+ def __del__(self):
139
+ if self._handle is not None:
140
+ self._close()
141
+
142
+ def _check_closed(self):
143
+ if self._handle is None:
144
+ raise OSError("handle is closed")
145
+
146
+ def _check_readable(self):
147
+ if not self._readable:
148
+ raise OSError("connection is write-only")
149
+
150
+ def _check_writable(self):
151
+ if not self._writable:
152
+ raise OSError("connection is read-only")
153
+
154
+ def _bad_message_length(self):
155
+ if self._writable:
156
+ self._readable = False
157
+ else:
158
+ self.close()
159
+ raise OSError("bad message length")
160
+
161
+ @property
162
+ def closed(self):
163
+ """True if the connection is closed"""
164
+ return self._handle is None
165
+
166
+ @property
167
+ def readable(self):
168
+ """True if the connection is readable"""
169
+ return self._readable
170
+
171
+ @property
172
+ def writable(self):
173
+ """True if the connection is writable"""
174
+ return self._writable
175
+
176
+ def fileno(self):
177
+ """File descriptor or handle of the connection"""
178
+ self._check_closed()
179
+ return self._handle
180
+
181
+ def close(self):
182
+ """Close the connection"""
183
+ if self._handle is not None:
184
+ try:
185
+ self._close()
186
+ finally:
187
+ self._handle = None
188
+
189
+ def send_bytes(self, buf, offset=0, size=None):
190
+ """Send the bytes data from a bytes-like object"""
191
+ self._check_closed()
192
+ self._check_writable()
193
+ m = memoryview(buf)
194
+ # HACK for byte-indexing of non-bytewise buffers (e.g. array.array)
195
+ if m.itemsize > 1:
196
+ m = memoryview(bytes(m))
197
+ n = len(m)
198
+ if offset < 0:
199
+ raise ValueError("offset is negative")
200
+ if n < offset:
201
+ raise ValueError("buffer length < offset")
202
+ if size is None:
203
+ size = n - offset
204
+ elif size < 0:
205
+ raise ValueError("size is negative")
206
+ elif offset + size > n:
207
+ raise ValueError("buffer length < offset + size")
208
+ self._send_bytes(m[offset:offset + size])
209
+
210
+ def send(self, obj):
211
+ """Send a (picklable) object"""
212
+ self._check_closed()
213
+ self._check_writable()
214
+ self._send_bytes(_ForkingPickler.dumps(obj))
215
+
216
+ def recv_bytes(self, maxlength=None):
217
+ """
218
+ Receive bytes data as a bytes object.
219
+ """
220
+ self._check_closed()
221
+ self._check_readable()
222
+ if maxlength is not None and maxlength < 0:
223
+ raise ValueError("negative maxlength")
224
+ buf = self._recv_bytes(maxlength)
225
+ if buf is None:
226
+ self._bad_message_length()
227
+ return buf.getvalue()
228
+
229
+ def recv_bytes_into(self, buf, offset=0):
230
+ """
231
+ Receive bytes data into a writeable bytes-like object.
232
+ Return the number of bytes read.
233
+ """
234
+ self._check_closed()
235
+ self._check_readable()
236
+ with memoryview(buf) as m:
237
+ # Get bytesize of arbitrary buffer
238
+ itemsize = m.itemsize
239
+ bytesize = itemsize * len(m)
240
+ if offset < 0:
241
+ raise ValueError("negative offset")
242
+ elif offset > bytesize:
243
+ raise ValueError("offset too large")
244
+ result = self._recv_bytes()
245
+ size = result.tell()
246
+ if bytesize < offset + size:
247
+ raise BufferTooShort(result.getvalue())
248
+ # Message can fit in dest
249
+ result.seek(0)
250
+ result.readinto(m[offset // itemsize :
251
+ (offset + size) // itemsize])
252
+ return size
253
+
254
+ def recv(self):
255
+ """Receive a (picklable) object"""
256
+ self._check_closed()
257
+ self._check_readable()
258
+ buf = self._recv_bytes()
259
+ return _ForkingPickler.loads(buf.getbuffer())
260
+
261
+ def poll(self, timeout=0.0):
262
+ """Whether there is any input available to be read"""
263
+ self._check_closed()
264
+ self._check_readable()
265
+ return self._poll(timeout)
266
+
267
+ def __enter__(self):
268
+ return self
269
+
270
+ def __exit__(self, exc_type, exc_value, exc_tb):
271
+ self.close()
272
+
273
+
274
+ if _winapi:
275
+
276
+ class PipeConnection(_ConnectionBase):
277
+ """
278
+ Connection class based on a Windows named pipe.
279
+ Overlapped I/O is used, so the handles must have been created
280
+ with FILE_FLAG_OVERLAPPED.
281
+ """
282
+ _got_empty_message = False
283
+
284
+ def _close(self, _CloseHandle=_winapi.CloseHandle):
285
+ _CloseHandle(self._handle)
286
+
287
+ def _send_bytes(self, buf):
288
+ ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
289
+ try:
290
+ if err == _winapi.ERROR_IO_PENDING:
291
+ waitres = _winapi.WaitForMultipleObjects(
292
+ [ov.event], False, INFINITE)
293
+ assert waitres == WAIT_OBJECT_0
294
+ except:
295
+ ov.cancel()
296
+ raise
297
+ finally:
298
+ nwritten, err = ov.GetOverlappedResult(True)
299
+ assert err == 0
300
+ assert nwritten == len(buf)
301
+
302
+ def _recv_bytes(self, maxsize=None):
303
+ if self._got_empty_message:
304
+ self._got_empty_message = False
305
+ return io.BytesIO()
306
+ else:
307
+ bsize = 128 if maxsize is None else min(maxsize, 128)
308
+ try:
309
+ ov, err = _winapi.ReadFile(self._handle, bsize,
310
+ overlapped=True)
311
+ try:
312
+ if err == _winapi.ERROR_IO_PENDING:
313
+ waitres = _winapi.WaitForMultipleObjects(
314
+ [ov.event], False, INFINITE)
315
+ assert waitres == WAIT_OBJECT_0
316
+ except:
317
+ ov.cancel()
318
+ raise
319
+ finally:
320
+ nread, err = ov.GetOverlappedResult(True)
321
+ if err == 0:
322
+ f = io.BytesIO()
323
+ f.write(ov.getbuffer())
324
+ return f
325
+ elif err == _winapi.ERROR_MORE_DATA:
326
+ return self._get_more_data(ov, maxsize)
327
+ except OSError as e:
328
+ if e.winerror == _winapi.ERROR_BROKEN_PIPE:
329
+ raise EOFError
330
+ else:
331
+ raise
332
+ raise RuntimeError("shouldn't get here; expected KeyboardInterrupt")
333
+
334
+ def _poll(self, timeout):
335
+ if (self._got_empty_message or
336
+ _winapi.PeekNamedPipe(self._handle)[0] != 0):
337
+ return True
338
+ return bool(wait([self], timeout))
339
+
340
+ def _get_more_data(self, ov, maxsize):
341
+ buf = ov.getbuffer()
342
+ f = io.BytesIO()
343
+ f.write(buf)
344
+ left = _winapi.PeekNamedPipe(self._handle)[1]
345
+ assert left > 0
346
+ if maxsize is not None and len(buf) + left > maxsize:
347
+ self._bad_message_length()
348
+ ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
349
+ rbytes, err = ov.GetOverlappedResult(True)
350
+ assert err == 0
351
+ assert rbytes == left
352
+ f.write(ov.getbuffer())
353
+ return f
354
+
355
+
356
+ class Connection(_ConnectionBase):
357
+ """
358
+ Connection class based on an arbitrary file descriptor (Unix only), or
359
+ a socket handle (Windows).
360
+ """
361
+
362
+ if _winapi:
363
+ def _close(self, _close=_multiprocessing.closesocket):
364
+ _close(self._handle)
365
+ _write = _multiprocessing.send
366
+ _read = _multiprocessing.recv
367
+ else:
368
+ def _close(self, _close=os.close):
369
+ _close(self._handle)
370
+ _write = os.write
371
+ _read = os.read
372
+
373
+ def _send(self, buf, write=_write):
374
+ remaining = len(buf)
375
+ while True:
376
+ n = write(self._handle, buf)
377
+ remaining -= n
378
+ if remaining == 0:
379
+ break
380
+ buf = buf[n:]
381
+
382
+ def _recv(self, size, read=_read):
383
+ buf = io.BytesIO()
384
+ handle = self._handle
385
+ remaining = size
386
+ while remaining > 0:
387
+ chunk = read(handle, remaining)
388
+ n = len(chunk)
389
+ if n == 0:
390
+ if remaining == size:
391
+ raise EOFError
392
+ else:
393
+ raise OSError("got end of file during message")
394
+ buf.write(chunk)
395
+ remaining -= n
396
+ return buf
397
+
398
+ def _send_bytes(self, buf):
399
+ n = len(buf)
400
+ if n > 0x7fffffff:
401
+ pre_header = struct.pack("!i", -1)
402
+ header = struct.pack("!Q", n)
403
+ self._send(pre_header)
404
+ self._send(header)
405
+ self._send(buf)
406
+ else:
407
+ # For wire compatibility with 3.7 and lower
408
+ header = struct.pack("!i", n)
409
+ if n > 16384:
410
+ # The payload is large so Nagle's algorithm won't be triggered
411
+ # and we'd better avoid the cost of concatenation.
412
+ self._send(header)
413
+ self._send(buf)
414
+ else:
415
+ # Issue #20540: concatenate before sending, to avoid delays due
416
+ # to Nagle's algorithm on a TCP socket.
417
+ # Also note we want to avoid sending a 0-length buffer separately,
418
+ # to avoid "broken pipe" errors if the other end closed the pipe.
419
+ self._send(header + buf)
420
+
421
+ def _recv_bytes(self, maxsize=None):
422
+ buf = self._recv(4)
423
+ size, = struct.unpack("!i", buf.getvalue())
424
+ if size == -1:
425
+ buf = self._recv(8)
426
+ size, = struct.unpack("!Q", buf.getvalue())
427
+ if maxsize is not None and size > maxsize:
428
+ return None
429
+ return self._recv(size)
430
+
431
+ def _poll(self, timeout):
432
+ r = wait([self], timeout)
433
+ return bool(r)
434
+
435
+
436
+ #
437
+ # Public functions
438
+ #
439
+
440
+ class Listener(object):
441
+ '''
442
+ Returns a listener object.
443
+
444
+ This is a wrapper for a bound socket which is 'listening' for
445
+ connections, or for a Windows named pipe.
446
+ '''
447
+ def __init__(self, address=None, family=None, backlog=1, authkey=None):
448
+ family = family or (address and address_type(address)) \
449
+ or default_family
450
+ address = address or arbitrary_address(family)
451
+
452
+ _validate_family(family)
453
+ if family == 'AF_PIPE':
454
+ self._listener = PipeListener(address, backlog)
455
+ else:
456
+ self._listener = SocketListener(address, family, backlog)
457
+
458
+ if authkey is not None and not isinstance(authkey, bytes):
459
+ raise TypeError('authkey should be a byte string')
460
+
461
+ self._authkey = authkey
462
+
463
+ def accept(self):
464
+ '''
465
+ Accept a connection on the bound socket or named pipe of `self`.
466
+
467
+ Returns a `Connection` object.
468
+ '''
469
+ if self._listener is None:
470
+ raise OSError('listener is closed')
471
+ c = self._listener.accept()
472
+ if self._authkey:
473
+ deliver_challenge(c, self._authkey)
474
+ answer_challenge(c, self._authkey)
475
+ return c
476
+
477
+ def close(self):
478
+ '''
479
+ Close the bound socket or named pipe of `self`.
480
+ '''
481
+ listener = self._listener
482
+ if listener is not None:
483
+ self._listener = None
484
+ listener.close()
485
+
486
+ @property
487
+ def address(self):
488
+ return self._listener._address
489
+
490
+ @property
491
+ def last_accepted(self):
492
+ return self._listener._last_accepted
493
+
494
+ def __enter__(self):
495
+ return self
496
+
497
+ def __exit__(self, exc_type, exc_value, exc_tb):
498
+ self.close()
499
+
500
+
501
+ def Client(address, family=None, authkey=None):
502
+ '''
503
+ Returns a connection to the address of a `Listener`
504
+ '''
505
+ family = family or address_type(address)
506
+ _validate_family(family)
507
+ if family == 'AF_PIPE':
508
+ c = PipeClient(address)
509
+ else:
510
+ c = SocketClient(address)
511
+
512
+ if authkey is not None and not isinstance(authkey, bytes):
513
+ raise TypeError('authkey should be a byte string')
514
+
515
+ if authkey is not None:
516
+ answer_challenge(c, authkey)
517
+ deliver_challenge(c, authkey)
518
+
519
+ return c
520
+
521
+
522
+ if sys.platform != 'win32':
523
+
524
+ def Pipe(duplex=True):
525
+ '''
526
+ Returns pair of connection objects at either end of a pipe
527
+ '''
528
+ if duplex:
529
+ s1, s2 = socket.socketpair()
530
+ s1.setblocking(True)
531
+ s2.setblocking(True)
532
+ c1 = Connection(s1.detach())
533
+ c2 = Connection(s2.detach())
534
+ else:
535
+ fd1, fd2 = os.pipe()
536
+ c1 = Connection(fd1, writable=False)
537
+ c2 = Connection(fd2, readable=False)
538
+
539
+ return c1, c2
540
+
541
+ else:
542
+
543
+ def Pipe(duplex=True):
544
+ '''
545
+ Returns pair of connection objects at either end of a pipe
546
+ '''
547
+ address = arbitrary_address('AF_PIPE')
548
+ if duplex:
549
+ openmode = _winapi.PIPE_ACCESS_DUPLEX
550
+ access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
551
+ obsize, ibsize = BUFSIZE, BUFSIZE
552
+ else:
553
+ openmode = _winapi.PIPE_ACCESS_INBOUND
554
+ access = _winapi.GENERIC_WRITE
555
+ obsize, ibsize = 0, BUFSIZE
556
+
557
+ h1 = _winapi.CreateNamedPipe(
558
+ address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
559
+ _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
560
+ _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
561
+ _winapi.PIPE_WAIT,
562
+ 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,
563
+ # default security descriptor: the handle cannot be inherited
564
+ _winapi.NULL
565
+ )
566
+ h2 = _winapi.CreateFile(
567
+ address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
568
+ _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
569
+ )
570
+ _winapi.SetNamedPipeHandleState(
571
+ h2, _winapi.PIPE_READMODE_MESSAGE, None, None
572
+ )
573
+
574
+ overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
575
+ _, err = overlapped.GetOverlappedResult(True)
576
+ assert err == 0
577
+
578
+ c1 = PipeConnection(h1, writable=duplex)
579
+ c2 = PipeConnection(h2, readable=duplex)
580
+
581
+ return c1, c2
582
+
583
+ #
584
+ # Definitions for connections based on sockets
585
+ #
586
+
587
+ class SocketListener(object):
588
+ '''
589
+ Representation of a socket which is bound to an address and listening
590
+ '''
591
+ def __init__(self, address, family, backlog=1):
592
+ self._socket = socket.socket(getattr(socket, family))
593
+ try:
594
+ # SO_REUSEADDR has different semantics on Windows (issue #2550).
595
+ if os.name == 'posix':
596
+ self._socket.setsockopt(socket.SOL_SOCKET,
597
+ socket.SO_REUSEADDR, 1)
598
+ self._socket.setblocking(True)
599
+ self._socket.bind(address)
600
+ self._socket.listen(backlog)
601
+ self._address = self._socket.getsockname()
602
+ except OSError:
603
+ self._socket.close()
604
+ raise
605
+ self._family = family
606
+ self._last_accepted = None
607
+
608
+ if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address):
609
+ # Linux abstract socket namespaces do not need to be explicitly unlinked
610
+ self._unlink = util.Finalize(
611
+ self, os.unlink, args=(address,), exitpriority=0
612
+ )
613
+ else:
614
+ self._unlink = None
615
+
616
+ def accept(self):
617
+ s, self._last_accepted = self._socket.accept()
618
+ s.setblocking(True)
619
+ return Connection(s.detach())
620
+
621
+ def close(self):
622
+ try:
623
+ self._socket.close()
624
+ finally:
625
+ unlink = self._unlink
626
+ if unlink is not None:
627
+ self._unlink = None
628
+ unlink()
629
+
630
+
631
+ def SocketClient(address):
632
+ '''
633
+ Return a connection object connected to the socket given by `address`
634
+ '''
635
+ family = address_type(address)
636
+ with socket.socket( getattr(socket, family) ) as s:
637
+ s.setblocking(True)
638
+ s.connect(address)
639
+ return Connection(s.detach())
640
+
641
+ #
642
+ # Definitions for connections based on named pipes
643
+ #
644
+
645
+ if sys.platform == 'win32':
646
+
647
+ class PipeListener(object):
648
+ '''
649
+ Representation of a named pipe
650
+ '''
651
+ def __init__(self, address, backlog=None):
652
+ self._address = address
653
+ self._handle_queue = [self._new_handle(first=True)]
654
+
655
+ self._last_accepted = None
656
+ util.sub_debug('listener created with address=%r', self._address)
657
+ self.close = util.Finalize(
658
+ self, PipeListener._finalize_pipe_listener,
659
+ args=(self._handle_queue, self._address), exitpriority=0
660
+ )
661
+
662
+ def _new_handle(self, first=False):
663
+ flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
664
+ if first:
665
+ flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
666
+ return _winapi.CreateNamedPipe(
667
+ self._address, flags,
668
+ _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
669
+ _winapi.PIPE_WAIT,
670
+ _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
671
+ _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
672
+ )
673
+
674
+ def accept(self):
675
+ self._handle_queue.append(self._new_handle())
676
+ handle = self._handle_queue.pop(0)
677
+ try:
678
+ ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
679
+ except OSError as e:
680
+ if e.winerror != _winapi.ERROR_NO_DATA:
681
+ raise
682
+ # ERROR_NO_DATA can occur if a client has already connected,
683
+ # written data and then disconnected -- see Issue 14725.
684
+ else:
685
+ try:
686
+ res = _winapi.WaitForMultipleObjects(
687
+ [ov.event], False, INFINITE)
688
+ except:
689
+ ov.cancel()
690
+ _winapi.CloseHandle(handle)
691
+ raise
692
+ finally:
693
+ _, err = ov.GetOverlappedResult(True)
694
+ assert err == 0
695
+ return PipeConnection(handle)
696
+
697
+ @staticmethod
698
+ def _finalize_pipe_listener(queue, address):
699
+ util.sub_debug('closing listener with address=%r', address)
700
+ for handle in queue:
701
+ _winapi.CloseHandle(handle)
702
+
703
+ def PipeClient(address):
704
+ '''
705
+ Return a connection object connected to the pipe given by `address`
706
+ '''
707
+ t = _init_timeout()
708
+ while 1:
709
+ try:
710
+ _winapi.WaitNamedPipe(address, 1000)
711
+ h = _winapi.CreateFile(
712
+ address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
713
+ 0, _winapi.NULL, _winapi.OPEN_EXISTING,
714
+ _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
715
+ )
716
+ except OSError as e:
717
+ if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
718
+ _winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
719
+ raise
720
+ else:
721
+ break
722
+ else:
723
+ raise
724
+
725
+ _winapi.SetNamedPipeHandleState(
726
+ h, _winapi.PIPE_READMODE_MESSAGE, None, None
727
+ )
728
+ return PipeConnection(h)
729
+
730
+ #
731
+ # Authentication stuff
732
+ #
733
+
734
+ MESSAGE_LENGTH = 20
735
+
736
+ CHALLENGE = b'#CHALLENGE#'
737
+ WELCOME = b'#WELCOME#'
738
+ FAILURE = b'#FAILURE#'
739
+
740
+ def deliver_challenge(connection, authkey):
741
+ import hmac
742
+ if not isinstance(authkey, bytes):
743
+ raise ValueError(
744
+ "Authkey must be bytes, not {0!s}".format(type(authkey)))
745
+ message = os.urandom(MESSAGE_LENGTH)
746
+ connection.send_bytes(CHALLENGE + message)
747
+ digest = hmac.new(authkey, message, 'md5').digest()
748
+ response = connection.recv_bytes(256) # reject large message
749
+ if response == digest:
750
+ connection.send_bytes(WELCOME)
751
+ else:
752
+ connection.send_bytes(FAILURE)
753
+ raise AuthenticationError('digest received was wrong')
754
+
755
+ def answer_challenge(connection, authkey):
756
+ import hmac
757
+ if not isinstance(authkey, bytes):
758
+ raise ValueError(
759
+ "Authkey must be bytes, not {0!s}".format(type(authkey)))
760
+ message = connection.recv_bytes(256) # reject large message
761
+ assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
762
+ message = message[len(CHALLENGE):]
763
+ digest = hmac.new(authkey, message, 'md5').digest()
764
+ connection.send_bytes(digest)
765
+ response = connection.recv_bytes(256) # reject large message
766
+ if response != WELCOME:
767
+ raise AuthenticationError('digest sent was rejected')
768
+
769
+ #
770
+ # Support for using xmlrpclib for serialization
771
+ #
772
+
773
+ class ConnectionWrapper(object):
774
+ def __init__(self, conn, dumps, loads):
775
+ self._conn = conn
776
+ self._dumps = dumps
777
+ self._loads = loads
778
+ for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
779
+ obj = getattr(conn, attr)
780
+ setattr(self, attr, obj)
781
+ def send(self, obj):
782
+ s = self._dumps(obj)
783
+ self._conn.send_bytes(s)
784
+ def recv(self):
785
+ s = self._conn.recv_bytes()
786
+ return self._loads(s)
787
+
788
+ def _xml_dumps(obj):
789
+ return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8')
790
+
791
+ def _xml_loads(s):
792
+ (obj,), method = xmlrpclib.loads(s.decode('utf-8'))
793
+ return obj
794
+
795
+ class XmlListener(Listener):
796
+ def accept(self):
797
+ global xmlrpclib
798
+ import xmlrpc.client as xmlrpclib
799
+ obj = Listener.accept(self)
800
+ return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
801
+
802
+ def XmlClient(*args, **kwds):
803
+ global xmlrpclib
804
+ import xmlrpc.client as xmlrpclib
805
+ return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
806
+
807
+ #
808
+ # Wait
809
+ #
810
+
811
+ if sys.platform == 'win32':
812
+
813
+ def _exhaustive_wait(handles, timeout):
814
+ # Return ALL handles which are currently signalled. (Only
815
+ # returning the first signalled might create starvation issues.)
816
+ L = list(handles)
817
+ ready = []
818
+ while L:
819
+ res = _winapi.WaitForMultipleObjects(L, False, timeout)
820
+ if res == WAIT_TIMEOUT:
821
+ break
822
+ elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
823
+ res -= WAIT_OBJECT_0
824
+ elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
825
+ res -= WAIT_ABANDONED_0
826
+ else:
827
+ raise RuntimeError('Should not get here')
828
+ ready.append(L[res])
829
+ L = L[res+1:]
830
+ timeout = 0
831
+ return ready
832
+
833
+ _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}
834
+
835
+ def wait(object_list, timeout=None):
836
+ '''
837
+ Wait till an object in object_list is ready/readable.
838
+
839
+ Returns list of those objects in object_list which are ready/readable.
840
+ '''
841
+ if timeout is None:
842
+ timeout = INFINITE
843
+ elif timeout < 0:
844
+ timeout = 0
845
+ else:
846
+ timeout = int(timeout * 1000 + 0.5)
847
+
848
+ object_list = list(object_list)
849
+ waithandle_to_obj = {}
850
+ ov_list = []
851
+ ready_objects = set()
852
+ ready_handles = set()
853
+
854
+ try:
855
+ for o in object_list:
856
+ try:
857
+ fileno = getattr(o, 'fileno')
858
+ except AttributeError:
859
+ waithandle_to_obj[o.__index__()] = o
860
+ else:
861
+ # start an overlapped read of length zero
862
+ try:
863
+ ov, err = _winapi.ReadFile(fileno(), 0, True)
864
+ except OSError as e:
865
+ ov, err = None, e.winerror
866
+ if err not in _ready_errors:
867
+ raise
868
+ if err == _winapi.ERROR_IO_PENDING:
869
+ ov_list.append(ov)
870
+ waithandle_to_obj[ov.event] = o
871
+ else:
872
+ # If o.fileno() is an overlapped pipe handle and
873
+ # err == 0 then there is a zero length message
874
+ # in the pipe, but it HAS NOT been consumed...
875
+ if ov and sys.getwindowsversion()[:2] >= (6, 2):
876
+ # ... except on Windows 8 and later, where
877
+ # the message HAS been consumed.
878
+ try:
879
+ _, err = ov.GetOverlappedResult(False)
880
+ except OSError as e:
881
+ err = e.winerror
882
+ if not err and hasattr(o, '_got_empty_message'):
883
+ o._got_empty_message = True
884
+ ready_objects.add(o)
885
+ timeout = 0
886
+
887
+ ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
888
+ finally:
889
+ # request that overlapped reads stop
890
+ for ov in ov_list:
891
+ ov.cancel()
892
+
893
+ # wait for all overlapped reads to stop
894
+ for ov in ov_list:
895
+ try:
896
+ _, err = ov.GetOverlappedResult(True)
897
+ except OSError as e:
898
+ err = e.winerror
899
+ if err not in _ready_errors:
900
+ raise
901
+ if err != _winapi.ERROR_OPERATION_ABORTED:
902
+ o = waithandle_to_obj[ov.event]
903
+ ready_objects.add(o)
904
+ if err == 0:
905
+ # If o.fileno() is an overlapped pipe handle then
906
+ # a zero length message HAS been consumed.
907
+ if hasattr(o, '_got_empty_message'):
908
+ o._got_empty_message = True
909
+
910
+ ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
911
+ return [o for o in object_list if o in ready_objects]
912
+
913
+ else:
914
+
915
+ import selectors
916
+
917
+ # poll/select have the advantage of not requiring any extra file
918
+ # descriptor, contrarily to epoll/kqueue (also, they require a single
919
+ # syscall).
920
+ if hasattr(selectors, 'PollSelector'):
921
+ _WaitSelector = selectors.PollSelector
922
+ else:
923
+ _WaitSelector = selectors.SelectSelector
924
+
925
+ def wait(object_list, timeout=None):
926
+ '''
927
+ Wait till an object in object_list is ready/readable.
928
+
929
+ Returns list of those objects in object_list which are ready/readable.
930
+ '''
931
+ with _WaitSelector() as selector:
932
+ for obj in object_list:
933
+ selector.register(obj, selectors.EVENT_READ)
934
+
935
+ if timeout is not None:
936
+ deadline = getattr(time,'monotonic',time.time)() + timeout
937
+
938
+ while True:
939
+ ready = selector.select(timeout)
940
+ if ready:
941
+ return [key.fileobj for (key, events) in ready]
942
+ else:
943
+ if timeout is not None:
944
+ timeout = deadline - getattr(time,'monotonic',time.time)()
945
+ if timeout < 0:
946
+ return ready
947
+
948
+ #
949
+ # Make connection and socket objects sharable if possible
950
+ #
951
+
952
+ if sys.platform == 'win32':
953
+ def reduce_connection(conn):
954
+ handle = conn.fileno()
955
+ with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
956
+ from . import resource_sharer
957
+ ds = resource_sharer.DupSocket(s)
958
+ return rebuild_connection, (ds, conn.readable, conn.writable)
959
+ def rebuild_connection(ds, readable, writable):
960
+ sock = ds.detach()
961
+ return Connection(sock.detach(), readable, writable)
962
+ reduction.register(Connection, reduce_connection)
963
+
964
+ def reduce_pipe_connection(conn):
965
+ access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
966
+ (_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
967
+ dh = reduction.DupHandle(conn.fileno(), access)
968
+ return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
969
+ def rebuild_pipe_connection(dh, readable, writable):
970
+ handle = dh.detach()
971
+ return PipeConnection(handle, readable, writable)
972
+ reduction.register(PipeConnection, reduce_pipe_connection)
973
+
974
+ else:
975
+ def reduce_connection(conn):
976
+ df = reduction.DupFd(conn.fileno())
977
+ return rebuild_connection, (df, conn.readable, conn.writable)
978
+ def rebuild_connection(df, readable, writable):
979
+ fd = df.detach()
980
+ return Connection(fd, readable, writable)
981
+ reduction.register(Connection, reduce_connection)
lib/python3.10/site-packages/multiprocess/context.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import threading
4
+
5
+ from . import process
6
+ from . import reduction
7
+
8
+ __all__ = ()
9
+
10
+ #
11
+ # Exceptions
12
+ #
13
+
14
+ class ProcessError(Exception):
15
+ pass
16
+
17
+ class BufferTooShort(ProcessError):
18
+ pass
19
+
20
+ class TimeoutError(ProcessError):
21
+ pass
22
+
23
+ class AuthenticationError(ProcessError):
24
+ pass
25
+
26
+ #
27
+ # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py
28
+ #
29
+
30
+ class BaseContext(object):
31
+
32
+ ProcessError = ProcessError
33
+ BufferTooShort = BufferTooShort
34
+ TimeoutError = TimeoutError
35
+ AuthenticationError = AuthenticationError
36
+
37
+ current_process = staticmethod(process.current_process)
38
+ parent_process = staticmethod(process.parent_process)
39
+ active_children = staticmethod(process.active_children)
40
+
41
+ def cpu_count(self):
42
+ '''Returns the number of CPUs in the system'''
43
+ num = os.cpu_count()
44
+ if num is None:
45
+ raise NotImplementedError('cannot determine number of cpus')
46
+ else:
47
+ return num
48
+
49
+ def Manager(self):
50
+ '''Returns a manager associated with a running server process
51
+
52
+ The managers methods such as `Lock()`, `Condition()` and `Queue()`
53
+ can be used to create shared objects.
54
+ '''
55
+ from .managers import SyncManager
56
+ m = SyncManager(ctx=self.get_context())
57
+ m.start()
58
+ return m
59
+
60
+ def Pipe(self, duplex=True):
61
+ '''Returns two connection object connected by a pipe'''
62
+ from .connection import Pipe
63
+ return Pipe(duplex)
64
+
65
+ def Lock(self):
66
+ '''Returns a non-recursive lock object'''
67
+ from .synchronize import Lock
68
+ return Lock(ctx=self.get_context())
69
+
70
+ def RLock(self):
71
+ '''Returns a recursive lock object'''
72
+ from .synchronize import RLock
73
+ return RLock(ctx=self.get_context())
74
+
75
+ def Condition(self, lock=None):
76
+ '''Returns a condition object'''
77
+ from .synchronize import Condition
78
+ return Condition(lock, ctx=self.get_context())
79
+
80
+ def Semaphore(self, value=1):
81
+ '''Returns a semaphore object'''
82
+ from .synchronize import Semaphore
83
+ return Semaphore(value, ctx=self.get_context())
84
+
85
+ def BoundedSemaphore(self, value=1):
86
+ '''Returns a bounded semaphore object'''
87
+ from .synchronize import BoundedSemaphore
88
+ return BoundedSemaphore(value, ctx=self.get_context())
89
+
90
+ def Event(self):
91
+ '''Returns an event object'''
92
+ from .synchronize import Event
93
+ return Event(ctx=self.get_context())
94
+
95
+ def Barrier(self, parties, action=None, timeout=None):
96
+ '''Returns a barrier object'''
97
+ from .synchronize import Barrier
98
+ return Barrier(parties, action, timeout, ctx=self.get_context())
99
+
100
+ def Queue(self, maxsize=0):
101
+ '''Returns a queue object'''
102
+ from .queues import Queue
103
+ return Queue(maxsize, ctx=self.get_context())
104
+
105
+ def JoinableQueue(self, maxsize=0):
106
+ '''Returns a queue object'''
107
+ from .queues import JoinableQueue
108
+ return JoinableQueue(maxsize, ctx=self.get_context())
109
+
110
+ def SimpleQueue(self):
111
+ '''Returns a queue object'''
112
+ from .queues import SimpleQueue
113
+ return SimpleQueue(ctx=self.get_context())
114
+
115
+ def Pool(self, processes=None, initializer=None, initargs=(),
116
+ maxtasksperchild=None):
117
+ '''Returns a process pool object'''
118
+ from .pool import Pool
119
+ return Pool(processes, initializer, initargs, maxtasksperchild,
120
+ context=self.get_context())
121
+
122
+ def RawValue(self, typecode_or_type, *args):
123
+ '''Returns a shared object'''
124
+ from .sharedctypes import RawValue
125
+ return RawValue(typecode_or_type, *args)
126
+
127
+ def RawArray(self, typecode_or_type, size_or_initializer):
128
+ '''Returns a shared array'''
129
+ from .sharedctypes import RawArray
130
+ return RawArray(typecode_or_type, size_or_initializer)
131
+
132
+ def Value(self, typecode_or_type, *args, lock=True):
133
+ '''Returns a synchronized shared object'''
134
+ from .sharedctypes import Value
135
+ return Value(typecode_or_type, *args, lock=lock,
136
+ ctx=self.get_context())
137
+
138
+ def Array(self, typecode_or_type, size_or_initializer, *, lock=True):
139
+ '''Returns a synchronized shared array'''
140
+ from .sharedctypes import Array
141
+ return Array(typecode_or_type, size_or_initializer, lock=lock,
142
+ ctx=self.get_context())
143
+
144
+ def freeze_support(self):
145
+ '''Check whether this is a fake forked process in a frozen executable.
146
+ If so then run code specified by commandline and exit.
147
+ '''
148
+ if sys.platform == 'win32' and getattr(sys, 'frozen', False):
149
+ from .spawn import freeze_support
150
+ freeze_support()
151
+
152
+ def get_logger(self):
153
+ '''Return package logger -- if it does not already exist then
154
+ it is created.
155
+ '''
156
+ from .util import get_logger
157
+ return get_logger()
158
+
159
+ def log_to_stderr(self, level=None):
160
+ '''Turn on logging and add a handler which prints to stderr'''
161
+ from .util import log_to_stderr
162
+ return log_to_stderr(level)
163
+
164
+ def allow_connection_pickling(self):
165
+ '''Install support for sending connections and sockets
166
+ between processes
167
+ '''
168
+ # This is undocumented. In previous versions of multiprocessing
169
+ # its only effect was to make socket objects inheritable on Windows.
170
+ from . import connection
171
+
172
+ def set_executable(self, executable):
173
+ '''Sets the path to a python.exe or pythonw.exe binary used to run
174
+ child processes instead of sys.executable when using the 'spawn'
175
+ start method. Useful for people embedding Python.
176
+ '''
177
+ from .spawn import set_executable
178
+ set_executable(executable)
179
+
180
+ def set_forkserver_preload(self, module_names):
181
+ '''Set list of module names to try to load in forkserver process.
182
+ This is really just a hint.
183
+ '''
184
+ from .forkserver import set_forkserver_preload
185
+ set_forkserver_preload(module_names)
186
+
187
+ def get_context(self, method=None):
188
+ if method is None:
189
+ return self
190
+ try:
191
+ ctx = _concrete_contexts[method]
192
+ except KeyError:
193
+ raise ValueError('cannot find context for %r' % method) from None
194
+ ctx._check_available()
195
+ return ctx
196
+
197
+ def get_start_method(self, allow_none=False):
198
+ return self._name
199
+
200
+ def set_start_method(self, method, force=False):
201
+ raise ValueError('cannot set start method of concrete context')
202
+
203
+ @property
204
+ def reducer(self):
205
+ '''Controls how objects will be reduced to a form that can be
206
+ shared with other processes.'''
207
+ return globals().get('reduction')
208
+
209
+ @reducer.setter
210
+ def reducer(self, reduction):
211
+ globals()['reduction'] = reduction
212
+
213
+ def _check_available(self):
214
+ pass
215
+
216
+ #
217
+ # Type of default context -- underlying context can be set at most once
218
+ #
219
+
220
+ class Process(process.BaseProcess):
221
+ _start_method = None
222
+ @staticmethod
223
+ def _Popen(process_obj):
224
+ return _default_context.get_context().Process._Popen(process_obj)
225
+
226
+ class DefaultContext(BaseContext):
227
+ Process = Process
228
+
229
+ def __init__(self, context):
230
+ self._default_context = context
231
+ self._actual_context = None
232
+
233
+ def get_context(self, method=None):
234
+ if method is None:
235
+ if self._actual_context is None:
236
+ self._actual_context = self._default_context
237
+ return self._actual_context
238
+ else:
239
+ return super().get_context(method)
240
+
241
+ def set_start_method(self, method, force=False):
242
+ if self._actual_context is not None and not force:
243
+ raise RuntimeError('context has already been set')
244
+ if method is None and force:
245
+ self._actual_context = None
246
+ return
247
+ self._actual_context = self.get_context(method)
248
+
249
+ def get_start_method(self, allow_none=False):
250
+ if self._actual_context is None:
251
+ if allow_none:
252
+ return None
253
+ self._actual_context = self._default_context
254
+ return self._actual_context._name
255
+
256
+ def get_all_start_methods(self):
257
+ if sys.platform == 'win32':
258
+ return ['spawn']
259
+ else:
260
+ methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn']
261
+ if reduction.HAVE_SEND_HANDLE:
262
+ methods.append('forkserver')
263
+ return methods
264
+
265
+
266
+ #
267
+ # Context types for fixed start method
268
+ #
269
+
270
+ if sys.platform != 'win32':
271
+
272
+ class ForkProcess(process.BaseProcess):
273
+ _start_method = 'fork'
274
+ @staticmethod
275
+ def _Popen(process_obj):
276
+ from .popen_fork import Popen
277
+ return Popen(process_obj)
278
+
279
+ class SpawnProcess(process.BaseProcess):
280
+ _start_method = 'spawn'
281
+ @staticmethod
282
+ def _Popen(process_obj):
283
+ from .popen_spawn_posix import Popen
284
+ return Popen(process_obj)
285
+
286
+ class ForkServerProcess(process.BaseProcess):
287
+ _start_method = 'forkserver'
288
+ @staticmethod
289
+ def _Popen(process_obj):
290
+ from .popen_forkserver import Popen
291
+ return Popen(process_obj)
292
+
293
+ class ForkContext(BaseContext):
294
+ _name = 'fork'
295
+ Process = ForkProcess
296
+
297
+ class SpawnContext(BaseContext):
298
+ _name = 'spawn'
299
+ Process = SpawnProcess
300
+
301
+ class ForkServerContext(BaseContext):
302
+ _name = 'forkserver'
303
+ Process = ForkServerProcess
304
+ def _check_available(self):
305
+ if not reduction.HAVE_SEND_HANDLE:
306
+ raise ValueError('forkserver start method not available')
307
+
308
+ _concrete_contexts = {
309
+ 'fork': ForkContext(),
310
+ 'spawn': SpawnContext(),
311
+ 'forkserver': ForkServerContext(),
312
+ }
313
+ if sys.platform == 'darwin':
314
+ # bpo-33725: running arbitrary code after fork() is no longer reliable
315
+ # on macOS since macOS 10.14 (Mojave). Use spawn by default instead.
316
+ _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn
317
+ else:
318
+ _default_context = DefaultContext(_concrete_contexts['fork'])
319
+
320
+ else:
321
+
322
+ class SpawnProcess(process.BaseProcess):
323
+ _start_method = 'spawn'
324
+ @staticmethod
325
+ def _Popen(process_obj):
326
+ from .popen_spawn_win32 import Popen
327
+ return Popen(process_obj)
328
+
329
+ class SpawnContext(BaseContext):
330
+ _name = 'spawn'
331
+ Process = SpawnProcess
332
+
333
+ _concrete_contexts = {
334
+ 'spawn': SpawnContext(),
335
+ }
336
+ _default_context = DefaultContext(_concrete_contexts['spawn'])
337
+
338
+ #
339
+ # Force the start method
340
+ #
341
+
342
+ def _force_start_method(method):
343
+ _default_context._actual_context = _concrete_contexts[method]
344
+
345
+ #
346
+ # Check that the current thread is spawning a child process
347
+ #
348
+
349
+ _tls = threading.local()
350
+
351
+ def get_spawning_popen():
352
+ return getattr(_tls, 'spawning_popen', None)
353
+
354
+ def set_spawning_popen(popen):
355
+ _tls.spawning_popen = popen
356
+
357
+ def assert_spawning(obj):
358
+ if get_spawning_popen() is None:
359
+ raise RuntimeError(
360
+ '%s objects should only be shared between processes'
361
+ ' through inheritance' % type(obj).__name__
362
+ )
lib/python3.10/site-packages/multiprocess/dummy/__init__.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Support for the API of the multiprocessing package using threads
3
+ #
4
+ # multiprocessing/dummy/__init__.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = [
11
+ 'Process', 'current_process', 'active_children', 'freeze_support',
12
+ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
13
+ 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
14
+ ]
15
+
16
+ #
17
+ # Imports
18
+ #
19
+
20
+ import threading
21
+ import sys
22
+ import weakref
23
+ import array
24
+
25
+ from .connection import Pipe
26
+ from threading import Lock, RLock, Semaphore, BoundedSemaphore
27
+ from threading import Event, Condition, Barrier
28
+ from queue import Queue
29
+
30
+ #
31
+ #
32
+ #
33
+
34
+ class DummyProcess(threading.Thread):
35
+
36
+ def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
37
+ threading.Thread.__init__(self, group, target, name, args, kwargs)
38
+ self._pid = None
39
+ self._children = weakref.WeakKeyDictionary()
40
+ self._start_called = False
41
+ self._parent = current_process()
42
+
43
+ def start(self):
44
+ if self._parent is not current_process():
45
+ raise RuntimeError(
46
+ "Parent is {0!r} but current_process is {1!r}".format(
47
+ self._parent, current_process()))
48
+ self._start_called = True
49
+ if hasattr(self._parent, '_children'):
50
+ self._parent._children[self] = None
51
+ threading.Thread.start(self)
52
+
53
+ @property
54
+ def exitcode(self):
55
+ if self._start_called and not self.is_alive():
56
+ return 0
57
+ else:
58
+ return None
59
+
60
+ #
61
+ #
62
+ #
63
+
64
+ Process = DummyProcess
65
+ current_process = threading.current_thread
66
+ current_process()._children = weakref.WeakKeyDictionary()
67
+
68
+ def active_children():
69
+ children = current_process()._children
70
+ for p in list(children):
71
+ if not p.is_alive():
72
+ children.pop(p, None)
73
+ return list(children)
74
+
75
+ def freeze_support():
76
+ pass
77
+
78
+ #
79
+ #
80
+ #
81
+
82
+ class Namespace(object):
83
+ def __init__(self, /, **kwds):
84
+ self.__dict__.update(kwds)
85
+ def __repr__(self):
86
+ items = list(self.__dict__.items())
87
+ temp = []
88
+ for name, value in items:
89
+ if not name.startswith('_'):
90
+ temp.append('%s=%r' % (name, value))
91
+ temp.sort()
92
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
93
+
94
+ dict = dict
95
+ list = list
96
+
97
+ def Array(typecode, sequence, lock=True):
98
+ return array.array(typecode, sequence)
99
+
100
+ class Value(object):
101
+ def __init__(self, typecode, value, lock=True):
102
+ self._typecode = typecode
103
+ self._value = value
104
+
105
+ @property
106
+ def value(self):
107
+ return self._value
108
+
109
+ @value.setter
110
+ def value(self, value):
111
+ self._value = value
112
+
113
+ def __repr__(self):
114
+ return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
115
+
116
+ def Manager():
117
+ return sys.modules[__name__]
118
+
119
+ def shutdown():
120
+ pass
121
+
122
+ def Pool(processes=None, initializer=None, initargs=()):
123
+ from ..pool import ThreadPool
124
+ return ThreadPool(processes, initializer, initargs)
125
+
126
+ JoinableQueue = Queue
lib/python3.10/site-packages/multiprocess/dummy/connection.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Analogue of `multiprocessing.connection` which uses queues instead of sockets
3
+ #
4
+ # multiprocessing/dummy/connection.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = [ 'Client', 'Listener', 'Pipe' ]
11
+
12
+ from queue import Queue
13
+
14
+
15
+ families = [None]
16
+
17
+
18
+ class Listener(object):
19
+
20
+ def __init__(self, address=None, family=None, backlog=1):
21
+ self._backlog_queue = Queue(backlog)
22
+
23
+ def accept(self):
24
+ return Connection(*self._backlog_queue.get())
25
+
26
+ def close(self):
27
+ self._backlog_queue = None
28
+
29
+ @property
30
+ def address(self):
31
+ return self._backlog_queue
32
+
33
+ def __enter__(self):
34
+ return self
35
+
36
+ def __exit__(self, exc_type, exc_value, exc_tb):
37
+ self.close()
38
+
39
+
40
+ def Client(address):
41
+ _in, _out = Queue(), Queue()
42
+ address.put((_out, _in))
43
+ return Connection(_in, _out)
44
+
45
+
46
+ def Pipe(duplex=True):
47
+ a, b = Queue(), Queue()
48
+ return Connection(a, b), Connection(b, a)
49
+
50
+
51
+ class Connection(object):
52
+
53
+ def __init__(self, _in, _out):
54
+ self._out = _out
55
+ self._in = _in
56
+ self.send = self.send_bytes = _out.put
57
+ self.recv = self.recv_bytes = _in.get
58
+
59
+ def poll(self, timeout=0.0):
60
+ if self._in.qsize() > 0:
61
+ return True
62
+ if timeout <= 0.0:
63
+ return False
64
+ with self._in.not_empty:
65
+ self._in.not_empty.wait(timeout)
66
+ return self._in.qsize() > 0
67
+
68
+ def close(self):
69
+ pass
70
+
71
+ def __enter__(self):
72
+ return self
73
+
74
+ def __exit__(self, exc_type, exc_value, exc_tb):
75
+ self.close()
lib/python3.10/site-packages/multiprocess/forkserver.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import errno
2
+ import os
3
+ import selectors
4
+ import signal
5
+ import socket
6
+ import struct
7
+ import sys
8
+ import threading
9
+ import warnings
10
+
11
+ from . import connection
12
+ from . import process
13
+ from .context import reduction
14
+ from . import resource_tracker
15
+ from . import spawn
16
+ from . import util
17
+
18
+ __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',
19
+ 'set_forkserver_preload']
20
+
21
+ #
22
+ #
23
+ #
24
+
25
+ MAXFDS_TO_SEND = 256
26
+ SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t
27
+
28
+ #
29
+ # Forkserver class
30
+ #
31
+
32
+ class ForkServer(object):
33
+
34
+ def __init__(self):
35
+ self._forkserver_address = None
36
+ self._forkserver_alive_fd = None
37
+ self._forkserver_pid = None
38
+ self._inherited_fds = None
39
+ self._lock = threading.Lock()
40
+ self._preload_modules = ['__main__']
41
+
42
+ def _stop(self):
43
+ # Method used by unit tests to stop the server
44
+ with self._lock:
45
+ self._stop_unlocked()
46
+
47
+ def _stop_unlocked(self):
48
+ if self._forkserver_pid is None:
49
+ return
50
+
51
+ # close the "alive" file descriptor asks the server to stop
52
+ os.close(self._forkserver_alive_fd)
53
+ self._forkserver_alive_fd = None
54
+
55
+ os.waitpid(self._forkserver_pid, 0)
56
+ self._forkserver_pid = None
57
+
58
+ if not util.is_abstract_socket_namespace(self._forkserver_address):
59
+ os.unlink(self._forkserver_address)
60
+ self._forkserver_address = None
61
+
62
+ def set_forkserver_preload(self, modules_names):
63
+ '''Set list of module names to try to load in forkserver process.'''
64
+ if not all(type(mod) is str for mod in self._preload_modules):
65
+ raise TypeError('module_names must be a list of strings')
66
+ self._preload_modules = modules_names
67
+
68
+ def get_inherited_fds(self):
69
+ '''Return list of fds inherited from parent process.
70
+
71
+ This returns None if the current process was not started by fork
72
+ server.
73
+ '''
74
+ return self._inherited_fds
75
+
76
+ def connect_to_new_process(self, fds):
77
+ '''Request forkserver to create a child process.
78
+
79
+ Returns a pair of fds (status_r, data_w). The calling process can read
80
+ the child process's pid and (eventually) its returncode from status_r.
81
+ The calling process should write to data_w the pickled preparation and
82
+ process data.
83
+ '''
84
+ self.ensure_running()
85
+ if len(fds) + 4 >= MAXFDS_TO_SEND:
86
+ raise ValueError('too many fds')
87
+ with socket.socket(socket.AF_UNIX) as client:
88
+ client.connect(self._forkserver_address)
89
+ parent_r, child_w = os.pipe()
90
+ child_r, parent_w = os.pipe()
91
+ allfds = [child_r, child_w, self._forkserver_alive_fd,
92
+ resource_tracker.getfd()]
93
+ allfds += fds
94
+ try:
95
+ reduction.sendfds(client, allfds)
96
+ return parent_r, parent_w
97
+ except:
98
+ os.close(parent_r)
99
+ os.close(parent_w)
100
+ raise
101
+ finally:
102
+ os.close(child_r)
103
+ os.close(child_w)
104
+
105
+ def ensure_running(self):
106
+ '''Make sure that a fork server is running.
107
+
108
+ This can be called from any process. Note that usually a child
109
+ process will just reuse the forkserver started by its parent, so
110
+ ensure_running() will do nothing.
111
+ '''
112
+ with self._lock:
113
+ resource_tracker.ensure_running()
114
+ if self._forkserver_pid is not None:
115
+ # forkserver was launched before, is it still running?
116
+ pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG)
117
+ if not pid:
118
+ # still alive
119
+ return
120
+ # dead, launch it again
121
+ os.close(self._forkserver_alive_fd)
122
+ self._forkserver_address = None
123
+ self._forkserver_alive_fd = None
124
+ self._forkserver_pid = None
125
+
126
+ cmd = ('from multiprocess.forkserver import main; ' +
127
+ 'main(%d, %d, %r, **%r)')
128
+
129
+ if self._preload_modules:
130
+ desired_keys = {'main_path', 'sys_path'}
131
+ data = spawn.get_preparation_data('ignore')
132
+ data = {x: y for x, y in data.items() if x in desired_keys}
133
+ else:
134
+ data = {}
135
+
136
+ with socket.socket(socket.AF_UNIX) as listener:
137
+ address = connection.arbitrary_address('AF_UNIX')
138
+ listener.bind(address)
139
+ if not util.is_abstract_socket_namespace(address):
140
+ os.chmod(address, 0o600)
141
+ listener.listen()
142
+
143
+ # all client processes own the write end of the "alive" pipe;
144
+ # when they all terminate the read end becomes ready.
145
+ alive_r, alive_w = os.pipe()
146
+ try:
147
+ fds_to_pass = [listener.fileno(), alive_r]
148
+ cmd %= (listener.fileno(), alive_r, self._preload_modules,
149
+ data)
150
+ exe = spawn.get_executable()
151
+ args = [exe] + util._args_from_interpreter_flags()
152
+ args += ['-c', cmd]
153
+ pid = util.spawnv_passfds(exe, args, fds_to_pass)
154
+ except:
155
+ os.close(alive_w)
156
+ raise
157
+ finally:
158
+ os.close(alive_r)
159
+ self._forkserver_address = address
160
+ self._forkserver_alive_fd = alive_w
161
+ self._forkserver_pid = pid
162
+
163
+ #
164
+ #
165
+ #
166
+
167
+ def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
168
+ '''Run forkserver.'''
169
+ if preload:
170
+ if '__main__' in preload and main_path is not None:
171
+ process.current_process()._inheriting = True
172
+ try:
173
+ spawn.import_main_path(main_path)
174
+ finally:
175
+ del process.current_process()._inheriting
176
+ for modname in preload:
177
+ try:
178
+ __import__(modname)
179
+ except ImportError:
180
+ pass
181
+
182
+ util._close_stdin()
183
+
184
+ sig_r, sig_w = os.pipe()
185
+ os.set_blocking(sig_r, False)
186
+ os.set_blocking(sig_w, False)
187
+
188
+ def sigchld_handler(*_unused):
189
+ # Dummy signal handler, doesn't do anything
190
+ pass
191
+
192
+ handlers = {
193
+ # unblocking SIGCHLD allows the wakeup fd to notify our event loop
194
+ signal.SIGCHLD: sigchld_handler,
195
+ # protect the process from ^C
196
+ signal.SIGINT: signal.SIG_IGN,
197
+ }
198
+ old_handlers = {sig: signal.signal(sig, val)
199
+ for (sig, val) in handlers.items()}
200
+
201
+ # calling os.write() in the Python signal handler is racy
202
+ signal.set_wakeup_fd(sig_w)
203
+
204
+ # map child pids to client fds
205
+ pid_to_fd = {}
206
+
207
+ with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \
208
+ selectors.DefaultSelector() as selector:
209
+ _forkserver._forkserver_address = listener.getsockname()
210
+
211
+ selector.register(listener, selectors.EVENT_READ)
212
+ selector.register(alive_r, selectors.EVENT_READ)
213
+ selector.register(sig_r, selectors.EVENT_READ)
214
+
215
+ while True:
216
+ try:
217
+ while True:
218
+ rfds = [key.fileobj for (key, events) in selector.select()]
219
+ if rfds:
220
+ break
221
+
222
+ if alive_r in rfds:
223
+ # EOF because no more client processes left
224
+ assert os.read(alive_r, 1) == b'', "Not at EOF?"
225
+ raise SystemExit
226
+
227
+ if sig_r in rfds:
228
+ # Got SIGCHLD
229
+ os.read(sig_r, 65536) # exhaust
230
+ while True:
231
+ # Scan for child processes
232
+ try:
233
+ pid, sts = os.waitpid(-1, os.WNOHANG)
234
+ except ChildProcessError:
235
+ break
236
+ if pid == 0:
237
+ break
238
+ child_w = pid_to_fd.pop(pid, None)
239
+ if child_w is not None:
240
+ returncode = os.waitstatus_to_exitcode(sts)
241
+ # Send exit code to client process
242
+ try:
243
+ write_signed(child_w, returncode)
244
+ except BrokenPipeError:
245
+ # client vanished
246
+ pass
247
+ os.close(child_w)
248
+ else:
249
+ # This shouldn't happen really
250
+ warnings.warn('forkserver: waitpid returned '
251
+ 'unexpected pid %d' % pid)
252
+
253
+ if listener in rfds:
254
+ # Incoming fork request
255
+ with listener.accept()[0] as s:
256
+ # Receive fds from client
257
+ fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
258
+ if len(fds) > MAXFDS_TO_SEND:
259
+ raise RuntimeError(
260
+ "Too many ({0:n}) fds to send".format(
261
+ len(fds)))
262
+ child_r, child_w, *fds = fds
263
+ s.close()
264
+ pid = os.fork()
265
+ if pid == 0:
266
+ # Child
267
+ code = 1
268
+ try:
269
+ listener.close()
270
+ selector.close()
271
+ unused_fds = [alive_r, child_w, sig_r, sig_w]
272
+ unused_fds.extend(pid_to_fd.values())
273
+ code = _serve_one(child_r, fds,
274
+ unused_fds,
275
+ old_handlers)
276
+ except Exception:
277
+ sys.excepthook(*sys.exc_info())
278
+ sys.stderr.flush()
279
+ finally:
280
+ os._exit(code)
281
+ else:
282
+ # Send pid to client process
283
+ try:
284
+ write_signed(child_w, pid)
285
+ except BrokenPipeError:
286
+ # client vanished
287
+ pass
288
+ pid_to_fd[pid] = child_w
289
+ os.close(child_r)
290
+ for fd in fds:
291
+ os.close(fd)
292
+
293
+ except OSError as e:
294
+ if e.errno != errno.ECONNABORTED:
295
+ raise
296
+
297
+
298
+ def _serve_one(child_r, fds, unused_fds, handlers):
299
+ # close unnecessary stuff and reset signal handlers
300
+ signal.set_wakeup_fd(-1)
301
+ for sig, val in handlers.items():
302
+ signal.signal(sig, val)
303
+ for fd in unused_fds:
304
+ os.close(fd)
305
+
306
+ (_forkserver._forkserver_alive_fd,
307
+ resource_tracker._resource_tracker._fd,
308
+ *_forkserver._inherited_fds) = fds
309
+
310
+ # Run process object received over pipe
311
+ parent_sentinel = os.dup(child_r)
312
+ code = spawn._main(child_r, parent_sentinel)
313
+
314
+ return code
315
+
316
+
317
+ #
318
+ # Read and write signed numbers
319
+ #
320
+
321
+ def read_signed(fd):
322
+ data = b''
323
+ length = SIGNED_STRUCT.size
324
+ while len(data) < length:
325
+ s = os.read(fd, length - len(data))
326
+ if not s:
327
+ raise EOFError('unexpected EOF')
328
+ data += s
329
+ return SIGNED_STRUCT.unpack(data)[0]
330
+
331
+ def write_signed(fd, n):
332
+ msg = SIGNED_STRUCT.pack(n)
333
+ while msg:
334
+ nbytes = os.write(fd, msg)
335
+ if nbytes == 0:
336
+ raise RuntimeError('should not get here')
337
+ msg = msg[nbytes:]
338
+
339
+ #
340
+ #
341
+ #
342
+
343
+ _forkserver = ForkServer()
344
+ ensure_running = _forkserver.ensure_running
345
+ get_inherited_fds = _forkserver.get_inherited_fds
346
+ connect_to_new_process = _forkserver.connect_to_new_process
347
+ set_forkserver_preload = _forkserver.set_forkserver_preload
lib/python3.10/site-packages/multiprocess/heap.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module which supports allocation of memory from an mmap
3
+ #
4
+ # multiprocessing/heap.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ import bisect
11
+ from collections import defaultdict
12
+ import mmap
13
+ import os
14
+ import sys
15
+ import tempfile
16
+ import threading
17
+
18
+ from .context import reduction, assert_spawning
19
+ from . import util
20
+
21
+ __all__ = ['BufferWrapper']
22
+
23
+ #
24
+ # Inheritable class which wraps an mmap, and from which blocks can be allocated
25
+ #
26
+
27
+ if sys.platform == 'win32':
28
+
29
+ import _winapi
30
+
31
+ class Arena(object):
32
+ """
33
+ A shared memory area backed by anonymous memory (Windows).
34
+ """
35
+
36
+ _rand = tempfile._RandomNameSequence()
37
+
38
+ def __init__(self, size):
39
+ self.size = size
40
+ for i in range(100):
41
+ name = 'pym-%d-%s' % (os.getpid(), next(self._rand))
42
+ buf = mmap.mmap(-1, size, tagname=name)
43
+ if _winapi.GetLastError() == 0:
44
+ break
45
+ # We have reopened a preexisting mmap.
46
+ buf.close()
47
+ else:
48
+ raise FileExistsError('Cannot find name for new mmap')
49
+ self.name = name
50
+ self.buffer = buf
51
+ self._state = (self.size, self.name)
52
+
53
+ def __getstate__(self):
54
+ assert_spawning(self)
55
+ return self._state
56
+
57
+ def __setstate__(self, state):
58
+ self.size, self.name = self._state = state
59
+ # Reopen existing mmap
60
+ self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
61
+ # XXX Temporarily preventing buildbot failures while determining
62
+ # XXX the correct long-term fix. See issue 23060
63
+ #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS
64
+
65
+ else:
66
+
67
+ class Arena(object):
68
+ """
69
+ A shared memory area backed by a temporary file (POSIX).
70
+ """
71
+
72
+ if sys.platform == 'linux':
73
+ _dir_candidates = ['/dev/shm']
74
+ else:
75
+ _dir_candidates = []
76
+
77
+ def __init__(self, size, fd=-1):
78
+ self.size = size
79
+ self.fd = fd
80
+ if fd == -1:
81
+ # Arena is created anew (if fd != -1, it means we're coming
82
+ # from rebuild_arena() below)
83
+ self.fd, name = tempfile.mkstemp(
84
+ prefix='pym-%d-'%os.getpid(),
85
+ dir=self._choose_dir(size))
86
+ os.unlink(name)
87
+ util.Finalize(self, os.close, (self.fd,))
88
+ os.ftruncate(self.fd, size)
89
+ self.buffer = mmap.mmap(self.fd, self.size)
90
+
91
+ def _choose_dir(self, size):
92
+ # Choose a non-storage backed directory if possible,
93
+ # to improve performance
94
+ for d in self._dir_candidates:
95
+ st = os.statvfs(d)
96
+ if st.f_bavail * st.f_frsize >= size: # enough free space?
97
+ return d
98
+ return util.get_temp_dir()
99
+
100
+ def reduce_arena(a):
101
+ if a.fd == -1:
102
+ raise ValueError('Arena is unpicklable because '
103
+ 'forking was enabled when it was created')
104
+ return rebuild_arena, (a.size, reduction.DupFd(a.fd))
105
+
106
+ def rebuild_arena(size, dupfd):
107
+ return Arena(size, dupfd.detach())
108
+
109
+ reduction.register(Arena, reduce_arena)
110
+
111
+ #
112
+ # Class allowing allocation of chunks of memory from arenas
113
+ #
114
+
115
+ class Heap(object):
116
+
117
+ # Minimum malloc() alignment
118
+ _alignment = 8
119
+
120
+ _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB
121
+ _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2
122
+
123
+ def __init__(self, size=mmap.PAGESIZE):
124
+ self._lastpid = os.getpid()
125
+ self._lock = threading.Lock()
126
+ # Current arena allocation size
127
+ self._size = size
128
+ # A sorted list of available block sizes in arenas
129
+ self._lengths = []
130
+
131
+ # Free block management:
132
+ # - map each block size to a list of `(Arena, start, stop)` blocks
133
+ self._len_to_seq = {}
134
+ # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block
135
+ # starting at that offset
136
+ self._start_to_block = {}
137
+ # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block
138
+ # ending at that offset
139
+ self._stop_to_block = {}
140
+
141
+ # Map arenas to their `(Arena, start, stop)` blocks in use
142
+ self._allocated_blocks = defaultdict(set)
143
+ self._arenas = []
144
+
145
+ # List of pending blocks to free - see comment in free() below
146
+ self._pending_free_blocks = []
147
+
148
+ # Statistics
149
+ self._n_mallocs = 0
150
+ self._n_frees = 0
151
+
152
+ @staticmethod
153
+ def _roundup(n, alignment):
154
+ # alignment must be a power of 2
155
+ mask = alignment - 1
156
+ return (n + mask) & ~mask
157
+
158
+ def _new_arena(self, size):
159
+ # Create a new arena with at least the given *size*
160
+ length = self._roundup(max(self._size, size), mmap.PAGESIZE)
161
+ # We carve larger and larger arenas, for efficiency, until we
162
+ # reach a large-ish size (roughly L3 cache-sized)
163
+ if self._size < self._DOUBLE_ARENA_SIZE_UNTIL:
164
+ self._size *= 2
165
+ util.info('allocating a new mmap of length %d', length)
166
+ arena = Arena(length)
167
+ self._arenas.append(arena)
168
+ return (arena, 0, length)
169
+
170
+ def _discard_arena(self, arena):
171
+ # Possibly delete the given (unused) arena
172
+ length = arena.size
173
+ # Reusing an existing arena is faster than creating a new one, so
174
+ # we only reclaim space if it's large enough.
175
+ if length < self._DISCARD_FREE_SPACE_LARGER_THAN:
176
+ return
177
+ blocks = self._allocated_blocks.pop(arena)
178
+ assert not blocks
179
+ del self._start_to_block[(arena, 0)]
180
+ del self._stop_to_block[(arena, length)]
181
+ self._arenas.remove(arena)
182
+ seq = self._len_to_seq[length]
183
+ seq.remove((arena, 0, length))
184
+ if not seq:
185
+ del self._len_to_seq[length]
186
+ self._lengths.remove(length)
187
+
188
+ def _malloc(self, size):
189
+ # returns a large enough block -- it might be much larger
190
+ i = bisect.bisect_left(self._lengths, size)
191
+ if i == len(self._lengths):
192
+ return self._new_arena(size)
193
+ else:
194
+ length = self._lengths[i]
195
+ seq = self._len_to_seq[length]
196
+ block = seq.pop()
197
+ if not seq:
198
+ del self._len_to_seq[length], self._lengths[i]
199
+
200
+ (arena, start, stop) = block
201
+ del self._start_to_block[(arena, start)]
202
+ del self._stop_to_block[(arena, stop)]
203
+ return block
204
+
205
+ def _add_free_block(self, block):
206
+ # make block available and try to merge with its neighbours in the arena
207
+ (arena, start, stop) = block
208
+
209
+ try:
210
+ prev_block = self._stop_to_block[(arena, start)]
211
+ except KeyError:
212
+ pass
213
+ else:
214
+ start, _ = self._absorb(prev_block)
215
+
216
+ try:
217
+ next_block = self._start_to_block[(arena, stop)]
218
+ except KeyError:
219
+ pass
220
+ else:
221
+ _, stop = self._absorb(next_block)
222
+
223
+ block = (arena, start, stop)
224
+ length = stop - start
225
+
226
+ try:
227
+ self._len_to_seq[length].append(block)
228
+ except KeyError:
229
+ self._len_to_seq[length] = [block]
230
+ bisect.insort(self._lengths, length)
231
+
232
+ self._start_to_block[(arena, start)] = block
233
+ self._stop_to_block[(arena, stop)] = block
234
+
235
+ def _absorb(self, block):
236
+ # deregister this block so it can be merged with a neighbour
237
+ (arena, start, stop) = block
238
+ del self._start_to_block[(arena, start)]
239
+ del self._stop_to_block[(arena, stop)]
240
+
241
+ length = stop - start
242
+ seq = self._len_to_seq[length]
243
+ seq.remove(block)
244
+ if not seq:
245
+ del self._len_to_seq[length]
246
+ self._lengths.remove(length)
247
+
248
+ return start, stop
249
+
250
+ def _remove_allocated_block(self, block):
251
+ arena, start, stop = block
252
+ blocks = self._allocated_blocks[arena]
253
+ blocks.remove((start, stop))
254
+ if not blocks:
255
+ # Arena is entirely free, discard it from this process
256
+ self._discard_arena(arena)
257
+
258
+ def _free_pending_blocks(self):
259
+ # Free all the blocks in the pending list - called with the lock held.
260
+ while True:
261
+ try:
262
+ block = self._pending_free_blocks.pop()
263
+ except IndexError:
264
+ break
265
+ self._add_free_block(block)
266
+ self._remove_allocated_block(block)
267
+
268
+ def free(self, block):
269
+ # free a block returned by malloc()
270
+ # Since free() can be called asynchronously by the GC, it could happen
271
+ # that it's called while self._lock is held: in that case,
272
+ # self._lock.acquire() would deadlock (issue #12352). To avoid that, a
273
+ # trylock is used instead, and if the lock can't be acquired
274
+ # immediately, the block is added to a list of blocks to be freed
275
+ # synchronously sometimes later from malloc() or free(), by calling
276
+ # _free_pending_blocks() (appending and retrieving from a list is not
277
+ # strictly thread-safe but under CPython it's atomic thanks to the GIL).
278
+ if os.getpid() != self._lastpid:
279
+ raise ValueError(
280
+ "My pid ({0:n}) is not last pid {1:n}".format(
281
+ os.getpid(),self._lastpid))
282
+ if not self._lock.acquire(False):
283
+ # can't acquire the lock right now, add the block to the list of
284
+ # pending blocks to free
285
+ self._pending_free_blocks.append(block)
286
+ else:
287
+ # we hold the lock
288
+ try:
289
+ self._n_frees += 1
290
+ self._free_pending_blocks()
291
+ self._add_free_block(block)
292
+ self._remove_allocated_block(block)
293
+ finally:
294
+ self._lock.release()
295
+
296
+ def malloc(self, size):
297
+ # return a block of right size (possibly rounded up)
298
+ if size < 0:
299
+ raise ValueError("Size {0:n} out of range".format(size))
300
+ if sys.maxsize <= size:
301
+ raise OverflowError("Size {0:n} too large".format(size))
302
+ if os.getpid() != self._lastpid:
303
+ self.__init__() # reinitialize after fork
304
+ with self._lock:
305
+ self._n_mallocs += 1
306
+ # allow pending blocks to be marked available
307
+ self._free_pending_blocks()
308
+ size = self._roundup(max(size, 1), self._alignment)
309
+ (arena, start, stop) = self._malloc(size)
310
+ real_stop = start + size
311
+ if real_stop < stop:
312
+ # if the returned block is larger than necessary, mark
313
+ # the remainder available
314
+ self._add_free_block((arena, real_stop, stop))
315
+ self._allocated_blocks[arena].add((start, real_stop))
316
+ return (arena, start, real_stop)
317
+
318
+ #
319
+ # Class wrapping a block allocated out of a Heap -- can be inherited by child process
320
+ #
321
+
322
+ class BufferWrapper(object):
323
+
324
+ _heap = Heap()
325
+
326
+ def __init__(self, size):
327
+ if size < 0:
328
+ raise ValueError("Size {0:n} out of range".format(size))
329
+ if sys.maxsize <= size:
330
+ raise OverflowError("Size {0:n} too large".format(size))
331
+ block = BufferWrapper._heap.malloc(size)
332
+ self._state = (block, size)
333
+ util.Finalize(self, BufferWrapper._heap.free, args=(block,))
334
+
335
+ def create_memoryview(self):
336
+ (arena, start, stop), size = self._state
337
+ return memoryview(arena.buffer)[start:start+size]
lib/python3.10/site-packages/multiprocess/managers.py ADDED
@@ -0,0 +1,1369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing manager classes for dealing
3
+ # with shared objects
4
+ #
5
+ # multiprocessing/managers.py
6
+ #
7
+ # Copyright (c) 2006-2008, R Oudkerk
8
+ # Licensed to PSF under a Contributor Agreement.
9
+ #
10
+
11
+ __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token',
12
+ 'SharedMemoryManager' ]
13
+
14
+ #
15
+ # Imports
16
+ #
17
+
18
+ import sys
19
+ import threading
20
+ import signal
21
+ import array
22
+ import queue
23
+ import time
24
+ import types
25
+ import os
26
+ from os import getpid
27
+
28
+ from traceback import format_exc
29
+
30
+ from . import connection
31
+ from .context import reduction, get_spawning_popen, ProcessError
32
+ from . import pool
33
+ from . import process
34
+ from . import util
35
+ from . import get_context
36
+ try:
37
+ from . import shared_memory
38
+ HAS_SHMEM = True
39
+ except ImportError:
40
+ HAS_SHMEM = False
41
+
42
+ #
43
+ # Register some things for pickling
44
+ #
45
+
46
+ def reduce_array(a):
47
+ return array.array, (a.typecode, a.tobytes())
48
+ reduction.register(array.array, reduce_array)
49
+
50
+ view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
51
+ if view_types[0] is not list: # only needed in Py3.0
52
+ def rebuild_as_list(obj):
53
+ return list, (list(obj),)
54
+ for view_type in view_types:
55
+ reduction.register(view_type, rebuild_as_list)
56
+
57
+ #
58
+ # Type for identifying shared objects
59
+ #
60
+
61
+ class Token(object):
62
+ '''
63
+ Type to uniquely identify a shared object
64
+ '''
65
+ __slots__ = ('typeid', 'address', 'id')
66
+
67
+ def __init__(self, typeid, address, id):
68
+ (self.typeid, self.address, self.id) = (typeid, address, id)
69
+
70
+ def __getstate__(self):
71
+ return (self.typeid, self.address, self.id)
72
+
73
+ def __setstate__(self, state):
74
+ (self.typeid, self.address, self.id) = state
75
+
76
+ def __repr__(self):
77
+ return '%s(typeid=%r, address=%r, id=%r)' % \
78
+ (self.__class__.__name__, self.typeid, self.address, self.id)
79
+
80
+ #
81
+ # Function for communication with a manager's server process
82
+ #
83
+
84
+ def dispatch(c, id, methodname, args=(), kwds={}):
85
+ '''
86
+ Send a message to manager using connection `c` and return response
87
+ '''
88
+ c.send((id, methodname, args, kwds))
89
+ kind, result = c.recv()
90
+ if kind == '#RETURN':
91
+ return result
92
+ raise convert_to_error(kind, result)
93
+
94
+ def convert_to_error(kind, result):
95
+ if kind == '#ERROR':
96
+ return result
97
+ elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
98
+ if not isinstance(result, str):
99
+ raise TypeError(
100
+ "Result {0!r} (kind '{1}') type is {2}, not str".format(
101
+ result, kind, type(result)))
102
+ if kind == '#UNSERIALIZABLE':
103
+ return RemoteError('Unserializable message: %s\n' % result)
104
+ else:
105
+ return RemoteError(result)
106
+ else:
107
+ return ValueError('Unrecognized message type {!r}'.format(kind))
108
+
109
+ class RemoteError(Exception):
110
+ def __str__(self):
111
+ return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
112
+
113
+ #
114
+ # Functions for finding the method names of an object
115
+ #
116
+
117
+ def all_methods(obj):
118
+ '''
119
+ Return a list of names of methods of `obj`
120
+ '''
121
+ temp = []
122
+ for name in dir(obj):
123
+ func = getattr(obj, name)
124
+ if callable(func):
125
+ temp.append(name)
126
+ return temp
127
+
128
+ def public_methods(obj):
129
+ '''
130
+ Return a list of names of methods of `obj` which do not start with '_'
131
+ '''
132
+ return [name for name in all_methods(obj) if name[0] != '_']
133
+
134
+ #
135
+ # Server which is run in a process controlled by a manager
136
+ #
137
+
138
+ class Server(object):
139
+ '''
140
+ Server class which runs in a process controlled by a manager object
141
+ '''
142
+ public = ['shutdown', 'create', 'accept_connection', 'get_methods',
143
+ 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
144
+
145
+ def __init__(self, registry, address, authkey, serializer):
146
+ if not isinstance(authkey, bytes):
147
+ raise TypeError(
148
+ "Authkey {0!r} is type {1!s}, not bytes".format(
149
+ authkey, type(authkey)))
150
+ self.registry = registry
151
+ self.authkey = process.AuthenticationString(authkey)
152
+ Listener, Client = listener_client[serializer]
153
+
154
+ # do authentication later
155
+ self.listener = Listener(address=address, backlog=16)
156
+ self.address = self.listener.address
157
+
158
+ self.id_to_obj = {'0': (None, ())}
159
+ self.id_to_refcount = {}
160
+ self.id_to_local_proxy_obj = {}
161
+ self.mutex = threading.Lock()
162
+
163
+ def serve_forever(self):
164
+ '''
165
+ Run the server forever
166
+ '''
167
+ self.stop_event = threading.Event()
168
+ process.current_process()._manager_server = self
169
+ try:
170
+ accepter = threading.Thread(target=self.accepter)
171
+ accepter.daemon = True
172
+ accepter.start()
173
+ try:
174
+ while not self.stop_event.is_set():
175
+ self.stop_event.wait(1)
176
+ except (KeyboardInterrupt, SystemExit):
177
+ pass
178
+ finally:
179
+ if sys.stdout != sys.__stdout__: # what about stderr?
180
+ util.debug('resetting stdout, stderr')
181
+ sys.stdout = sys.__stdout__
182
+ sys.stderr = sys.__stderr__
183
+ sys.exit(0)
184
+
185
+ def accepter(self):
186
+ while True:
187
+ try:
188
+ c = self.listener.accept()
189
+ except OSError:
190
+ continue
191
+ t = threading.Thread(target=self.handle_request, args=(c,))
192
+ t.daemon = True
193
+ t.start()
194
+
195
+ def handle_request(self, c):
196
+ '''
197
+ Handle a new connection
198
+ '''
199
+ funcname = result = request = None
200
+ try:
201
+ connection.deliver_challenge(c, self.authkey)
202
+ connection.answer_challenge(c, self.authkey)
203
+ request = c.recv()
204
+ ignore, funcname, args, kwds = request
205
+ assert funcname in self.public, '%r unrecognized' % funcname
206
+ func = getattr(self, funcname)
207
+ except Exception:
208
+ msg = ('#TRACEBACK', format_exc())
209
+ else:
210
+ try:
211
+ result = func(c, *args, **kwds)
212
+ except Exception:
213
+ msg = ('#TRACEBACK', format_exc())
214
+ else:
215
+ msg = ('#RETURN', result)
216
+ try:
217
+ c.send(msg)
218
+ except Exception as e:
219
+ try:
220
+ c.send(('#TRACEBACK', format_exc()))
221
+ except Exception:
222
+ pass
223
+ util.info('Failure to send message: %r', msg)
224
+ util.info(' ... request was %r', request)
225
+ util.info(' ... exception was %r', e)
226
+
227
+ c.close()
228
+
229
+ def serve_client(self, conn):
230
+ '''
231
+ Handle requests from the proxies in a particular process/thread
232
+ '''
233
+ util.debug('starting server thread to service %r',
234
+ threading.current_thread().name)
235
+
236
+ recv = conn.recv
237
+ send = conn.send
238
+ id_to_obj = self.id_to_obj
239
+
240
+ while not self.stop_event.is_set():
241
+
242
+ try:
243
+ methodname = obj = None
244
+ request = recv()
245
+ ident, methodname, args, kwds = request
246
+ try:
247
+ obj, exposed, gettypeid = id_to_obj[ident]
248
+ except KeyError as ke:
249
+ try:
250
+ obj, exposed, gettypeid = \
251
+ self.id_to_local_proxy_obj[ident]
252
+ except KeyError:
253
+ raise ke
254
+
255
+ if methodname not in exposed:
256
+ raise AttributeError(
257
+ 'method %r of %r object is not in exposed=%r' %
258
+ (methodname, type(obj), exposed)
259
+ )
260
+
261
+ function = getattr(obj, methodname)
262
+
263
+ try:
264
+ res = function(*args, **kwds)
265
+ except Exception as e:
266
+ msg = ('#ERROR', e)
267
+ else:
268
+ typeid = gettypeid and gettypeid.get(methodname, None)
269
+ if typeid:
270
+ rident, rexposed = self.create(conn, typeid, res)
271
+ token = Token(typeid, self.address, rident)
272
+ msg = ('#PROXY', (rexposed, token))
273
+ else:
274
+ msg = ('#RETURN', res)
275
+
276
+ except AttributeError:
277
+ if methodname is None:
278
+ msg = ('#TRACEBACK', format_exc())
279
+ else:
280
+ try:
281
+ fallback_func = self.fallback_mapping[methodname]
282
+ result = fallback_func(
283
+ self, conn, ident, obj, *args, **kwds
284
+ )
285
+ msg = ('#RETURN', result)
286
+ except Exception:
287
+ msg = ('#TRACEBACK', format_exc())
288
+
289
+ except EOFError:
290
+ util.debug('got EOF -- exiting thread serving %r',
291
+ threading.current_thread().name)
292
+ sys.exit(0)
293
+
294
+ except Exception:
295
+ msg = ('#TRACEBACK', format_exc())
296
+
297
+ try:
298
+ try:
299
+ send(msg)
300
+ except Exception:
301
+ send(('#UNSERIALIZABLE', format_exc()))
302
+ except Exception as e:
303
+ util.info('exception in thread serving %r',
304
+ threading.current_thread().name)
305
+ util.info(' ... message was %r', msg)
306
+ util.info(' ... exception was %r', e)
307
+ conn.close()
308
+ sys.exit(1)
309
+
310
+ def fallback_getvalue(self, conn, ident, obj):
311
+ return obj
312
+
313
+ def fallback_str(self, conn, ident, obj):
314
+ return str(obj)
315
+
316
+ def fallback_repr(self, conn, ident, obj):
317
+ return repr(obj)
318
+
319
+ fallback_mapping = {
320
+ '__str__':fallback_str,
321
+ '__repr__':fallback_repr,
322
+ '#GETVALUE':fallback_getvalue
323
+ }
324
+
325
+ def dummy(self, c):
326
+ pass
327
+
328
+ def debug_info(self, c):
329
+ '''
330
+ Return some info --- useful to spot problems with refcounting
331
+ '''
332
+ # Perhaps include debug info about 'c'?
333
+ with self.mutex:
334
+ result = []
335
+ keys = list(self.id_to_refcount.keys())
336
+ keys.sort()
337
+ for ident in keys:
338
+ if ident != '0':
339
+ result.append(' %s: refcount=%s\n %s' %
340
+ (ident, self.id_to_refcount[ident],
341
+ str(self.id_to_obj[ident][0])[:75]))
342
+ return '\n'.join(result)
343
+
344
+ def number_of_objects(self, c):
345
+ '''
346
+ Number of shared objects
347
+ '''
348
+ # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
349
+ return len(self.id_to_refcount)
350
+
351
+ def shutdown(self, c):
352
+ '''
353
+ Shutdown this process
354
+ '''
355
+ try:
356
+ util.debug('manager received shutdown message')
357
+ c.send(('#RETURN', None))
358
+ except:
359
+ import traceback
360
+ traceback.print_exc()
361
+ finally:
362
+ self.stop_event.set()
363
+
364
+ def create(self, c, typeid, /, *args, **kwds):
365
+ '''
366
+ Create a new shared object and return its id
367
+ '''
368
+ with self.mutex:
369
+ callable, exposed, method_to_typeid, proxytype = \
370
+ self.registry[typeid]
371
+
372
+ if callable is None:
373
+ if kwds or (len(args) != 1):
374
+ raise ValueError(
375
+ "Without callable, must have one non-keyword argument")
376
+ obj = args[0]
377
+ else:
378
+ obj = callable(*args, **kwds)
379
+
380
+ if exposed is None:
381
+ exposed = public_methods(obj)
382
+ if method_to_typeid is not None:
383
+ if not isinstance(method_to_typeid, dict):
384
+ raise TypeError(
385
+ "Method_to_typeid {0!r}: type {1!s}, not dict".format(
386
+ method_to_typeid, type(method_to_typeid)))
387
+ exposed = list(exposed) + list(method_to_typeid)
388
+
389
+ ident = '%x' % id(obj) # convert to string because xmlrpclib
390
+ # only has 32 bit signed integers
391
+ util.debug('%r callable returned object with id %r', typeid, ident)
392
+
393
+ self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
394
+ if ident not in self.id_to_refcount:
395
+ self.id_to_refcount[ident] = 0
396
+
397
+ self.incref(c, ident)
398
+ return ident, tuple(exposed)
399
+
400
+ def get_methods(self, c, token):
401
+ '''
402
+ Return the methods of the shared object indicated by token
403
+ '''
404
+ return tuple(self.id_to_obj[token.id][1])
405
+
406
+ def accept_connection(self, c, name):
407
+ '''
408
+ Spawn a new thread to serve this connection
409
+ '''
410
+ threading.current_thread().name = name
411
+ c.send(('#RETURN', None))
412
+ self.serve_client(c)
413
+
414
+ def incref(self, c, ident):
415
+ with self.mutex:
416
+ try:
417
+ self.id_to_refcount[ident] += 1
418
+ except KeyError as ke:
419
+ # If no external references exist but an internal (to the
420
+ # manager) still does and a new external reference is created
421
+ # from it, restore the manager's tracking of it from the
422
+ # previously stashed internal ref.
423
+ if ident in self.id_to_local_proxy_obj:
424
+ self.id_to_refcount[ident] = 1
425
+ self.id_to_obj[ident] = \
426
+ self.id_to_local_proxy_obj[ident]
427
+ obj, exposed, gettypeid = self.id_to_obj[ident]
428
+ util.debug('Server re-enabled tracking & INCREF %r', ident)
429
+ else:
430
+ raise ke
431
+
432
+ def decref(self, c, ident):
433
+ if ident not in self.id_to_refcount and \
434
+ ident in self.id_to_local_proxy_obj:
435
+ util.debug('Server DECREF skipping %r', ident)
436
+ return
437
+
438
+ with self.mutex:
439
+ if self.id_to_refcount[ident] <= 0:
440
+ raise AssertionError(
441
+ "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
442
+ ident, self.id_to_obj[ident],
443
+ self.id_to_refcount[ident]))
444
+ self.id_to_refcount[ident] -= 1
445
+ if self.id_to_refcount[ident] == 0:
446
+ del self.id_to_refcount[ident]
447
+
448
+ if ident not in self.id_to_refcount:
449
+ # Two-step process in case the object turns out to contain other
450
+ # proxy objects (e.g. a managed list of managed lists).
451
+ # Otherwise, deleting self.id_to_obj[ident] would trigger the
452
+ # deleting of the stored value (another managed object) which would
453
+ # in turn attempt to acquire the mutex that is already held here.
454
+ self.id_to_obj[ident] = (None, (), None) # thread-safe
455
+ util.debug('disposing of obj with id %r', ident)
456
+ with self.mutex:
457
+ del self.id_to_obj[ident]
458
+
459
+
460
+ #
461
+ # Class to represent state of a manager
462
+ #
463
+
464
+ class State(object):
465
+ __slots__ = ['value']
466
+ INITIAL = 0
467
+ STARTED = 1
468
+ SHUTDOWN = 2
469
+
470
+ #
471
+ # Mapping from serializer name to Listener and Client types
472
+ #
473
+
474
+ listener_client = { #XXX: register dill?
475
+ 'pickle' : (connection.Listener, connection.Client),
476
+ 'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
477
+ }
478
+
479
+ #
480
+ # Definition of BaseManager
481
+ #
482
+
483
+ class BaseManager(object):
484
+ '''
485
+ Base class for managers
486
+ '''
487
+ _registry = {}
488
+ _Server = Server
489
+
490
+ def __init__(self, address=None, authkey=None, serializer='pickle',
491
+ ctx=None):
492
+ if authkey is None:
493
+ authkey = process.current_process().authkey
494
+ self._address = address # XXX not final address if eg ('', 0)
495
+ self._authkey = process.AuthenticationString(authkey)
496
+ self._state = State()
497
+ self._state.value = State.INITIAL
498
+ self._serializer = serializer
499
+ self._Listener, self._Client = listener_client[serializer]
500
+ self._ctx = ctx or get_context()
501
+
502
+ def get_server(self):
503
+ '''
504
+ Return server object with serve_forever() method and address attribute
505
+ '''
506
+ if self._state.value != State.INITIAL:
507
+ if self._state.value == State.STARTED:
508
+ raise ProcessError("Already started server")
509
+ elif self._state.value == State.SHUTDOWN:
510
+ raise ProcessError("Manager has shut down")
511
+ else:
512
+ raise ProcessError(
513
+ "Unknown state {!r}".format(self._state.value))
514
+ return Server(self._registry, self._address,
515
+ self._authkey, self._serializer)
516
+
517
+ def connect(self):
518
+ '''
519
+ Connect manager object to the server process
520
+ '''
521
+ Listener, Client = listener_client[self._serializer]
522
+ conn = Client(self._address, authkey=self._authkey)
523
+ dispatch(conn, None, 'dummy')
524
+ self._state.value = State.STARTED
525
+
526
+ def start(self, initializer=None, initargs=()):
527
+ '''
528
+ Spawn a server process for this manager object
529
+ '''
530
+ if self._state.value != State.INITIAL:
531
+ if self._state.value == State.STARTED:
532
+ raise ProcessError("Already started server")
533
+ elif self._state.value == State.SHUTDOWN:
534
+ raise ProcessError("Manager has shut down")
535
+ else:
536
+ raise ProcessError(
537
+ "Unknown state {!r}".format(self._state.value))
538
+
539
+ if initializer is not None and not callable(initializer):
540
+ raise TypeError('initializer must be a callable')
541
+
542
+ # pipe over which we will retrieve address of server
543
+ reader, writer = connection.Pipe(duplex=False)
544
+
545
+ # spawn process which runs a server
546
+ self._process = self._ctx.Process(
547
+ target=type(self)._run_server,
548
+ args=(self._registry, self._address, self._authkey,
549
+ self._serializer, writer, initializer, initargs),
550
+ )
551
+ ident = ':'.join(str(i) for i in self._process._identity)
552
+ self._process.name = type(self).__name__ + '-' + ident
553
+ self._process.start()
554
+
555
+ # get address of server
556
+ writer.close()
557
+ self._address = reader.recv()
558
+ reader.close()
559
+
560
+ # register a finalizer
561
+ self._state.value = State.STARTED
562
+ self.shutdown = util.Finalize(
563
+ self, type(self)._finalize_manager,
564
+ args=(self._process, self._address, self._authkey,
565
+ self._state, self._Client),
566
+ exitpriority=0
567
+ )
568
+
569
+ @classmethod
570
+ def _run_server(cls, registry, address, authkey, serializer, writer,
571
+ initializer=None, initargs=()):
572
+ '''
573
+ Create a server, report its address and run it
574
+ '''
575
+ # bpo-36368: protect server process from KeyboardInterrupt signals
576
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
577
+
578
+ if initializer is not None:
579
+ initializer(*initargs)
580
+
581
+ # create server
582
+ server = cls._Server(registry, address, authkey, serializer)
583
+
584
+ # inform parent process of the server's address
585
+ writer.send(server.address)
586
+ writer.close()
587
+
588
+ # run the manager
589
+ util.info('manager serving at %r', server.address)
590
+ server.serve_forever()
591
+
592
+ def _create(self, typeid, /, *args, **kwds):
593
+ '''
594
+ Create a new shared object; return the token and exposed tuple
595
+ '''
596
+ assert self._state.value == State.STARTED, 'server not yet started'
597
+ conn = self._Client(self._address, authkey=self._authkey)
598
+ try:
599
+ id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
600
+ finally:
601
+ conn.close()
602
+ return Token(typeid, self._address, id), exposed
603
+
604
+ def join(self, timeout=None):
605
+ '''
606
+ Join the manager process (if it has been spawned)
607
+ '''
608
+ if self._process is not None:
609
+ self._process.join(timeout)
610
+ if not self._process.is_alive():
611
+ self._process = None
612
+
613
+ def _debug_info(self):
614
+ '''
615
+ Return some info about the servers shared objects and connections
616
+ '''
617
+ conn = self._Client(self._address, authkey=self._authkey)
618
+ try:
619
+ return dispatch(conn, None, 'debug_info')
620
+ finally:
621
+ conn.close()
622
+
623
+ def _number_of_objects(self):
624
+ '''
625
+ Return the number of shared objects
626
+ '''
627
+ conn = self._Client(self._address, authkey=self._authkey)
628
+ try:
629
+ return dispatch(conn, None, 'number_of_objects')
630
+ finally:
631
+ conn.close()
632
+
633
+ def __enter__(self):
634
+ if self._state.value == State.INITIAL:
635
+ self.start()
636
+ if self._state.value != State.STARTED:
637
+ if self._state.value == State.INITIAL:
638
+ raise ProcessError("Unable to start server")
639
+ elif self._state.value == State.SHUTDOWN:
640
+ raise ProcessError("Manager has shut down")
641
+ else:
642
+ raise ProcessError(
643
+ "Unknown state {!r}".format(self._state.value))
644
+ return self
645
+
646
+ def __exit__(self, exc_type, exc_val, exc_tb):
647
+ self.shutdown()
648
+
649
+ @staticmethod
650
+ def _finalize_manager(process, address, authkey, state, _Client):
651
+ '''
652
+ Shutdown the manager process; will be registered as a finalizer
653
+ '''
654
+ if process.is_alive():
655
+ util.info('sending shutdown message to manager')
656
+ try:
657
+ conn = _Client(address, authkey=authkey)
658
+ try:
659
+ dispatch(conn, None, 'shutdown')
660
+ finally:
661
+ conn.close()
662
+ except Exception:
663
+ pass
664
+
665
+ process.join(timeout=1.0)
666
+ if process.is_alive():
667
+ util.info('manager still alive')
668
+ if hasattr(process, 'terminate'):
669
+ util.info('trying to `terminate()` manager process')
670
+ process.terminate()
671
+ process.join(timeout=0.1)
672
+ if process.is_alive():
673
+ util.info('manager still alive after terminate')
674
+
675
+ state.value = State.SHUTDOWN
676
+ try:
677
+ del BaseProxy._address_to_local[address]
678
+ except KeyError:
679
+ pass
680
+
681
+ @property
682
+ def address(self):
683
+ return self._address
684
+
685
+ @classmethod
686
+ def register(cls, typeid, callable=None, proxytype=None, exposed=None,
687
+ method_to_typeid=None, create_method=True):
688
+ '''
689
+ Register a typeid with the manager type
690
+ '''
691
+ if '_registry' not in cls.__dict__:
692
+ cls._registry = cls._registry.copy()
693
+
694
+ if proxytype is None:
695
+ proxytype = AutoProxy
696
+
697
+ exposed = exposed or getattr(proxytype, '_exposed_', None)
698
+
699
+ method_to_typeid = method_to_typeid or \
700
+ getattr(proxytype, '_method_to_typeid_', None)
701
+
702
+ if method_to_typeid:
703
+ for key, value in list(method_to_typeid.items()): # isinstance?
704
+ assert type(key) is str, '%r is not a string' % key
705
+ assert type(value) is str, '%r is not a string' % value
706
+
707
+ cls._registry[typeid] = (
708
+ callable, exposed, method_to_typeid, proxytype
709
+ )
710
+
711
+ if create_method:
712
+ def temp(self, /, *args, **kwds):
713
+ util.debug('requesting creation of a shared %r object', typeid)
714
+ token, exp = self._create(typeid, *args, **kwds)
715
+ proxy = proxytype(
716
+ token, self._serializer, manager=self,
717
+ authkey=self._authkey, exposed=exp
718
+ )
719
+ conn = self._Client(token.address, authkey=self._authkey)
720
+ dispatch(conn, None, 'decref', (token.id,))
721
+ return proxy
722
+ temp.__name__ = typeid
723
+ setattr(cls, typeid, temp)
724
+
725
+ #
726
+ # Subclass of set which get cleared after a fork
727
+ #
728
+
729
+ class ProcessLocalSet(set):
730
+ def __init__(self):
731
+ util.register_after_fork(self, lambda obj: obj.clear())
732
+ def __reduce__(self):
733
+ return type(self), ()
734
+
735
+ #
736
+ # Definition of BaseProxy
737
+ #
738
+
739
+ class BaseProxy(object):
740
+ '''
741
+ A base for proxies of shared objects
742
+ '''
743
+ _address_to_local = {}
744
+ _mutex = util.ForkAwareThreadLock()
745
+
746
+ def __init__(self, token, serializer, manager=None,
747
+ authkey=None, exposed=None, incref=True, manager_owned=False):
748
+ with BaseProxy._mutex:
749
+ tls_idset = BaseProxy._address_to_local.get(token.address, None)
750
+ if tls_idset is None:
751
+ tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
752
+ BaseProxy._address_to_local[token.address] = tls_idset
753
+
754
+ # self._tls is used to record the connection used by this
755
+ # thread to communicate with the manager at token.address
756
+ self._tls = tls_idset[0]
757
+
758
+ # self._idset is used to record the identities of all shared
759
+ # objects for which the current process owns references and
760
+ # which are in the manager at token.address
761
+ self._idset = tls_idset[1]
762
+
763
+ self._token = token
764
+ self._id = self._token.id
765
+ self._manager = manager
766
+ self._serializer = serializer
767
+ self._Client = listener_client[serializer][1]
768
+
769
+ # Should be set to True only when a proxy object is being created
770
+ # on the manager server; primary use case: nested proxy objects.
771
+ # RebuildProxy detects when a proxy is being created on the manager
772
+ # and sets this value appropriately.
773
+ self._owned_by_manager = manager_owned
774
+
775
+ if authkey is not None:
776
+ self._authkey = process.AuthenticationString(authkey)
777
+ elif self._manager is not None:
778
+ self._authkey = self._manager._authkey
779
+ else:
780
+ self._authkey = process.current_process().authkey
781
+
782
+ if incref:
783
+ self._incref()
784
+
785
+ util.register_after_fork(self, BaseProxy._after_fork)
786
+
787
+ def _connect(self):
788
+ util.debug('making connection to manager')
789
+ name = process.current_process().name
790
+ if threading.current_thread().name != 'MainThread':
791
+ name += '|' + threading.current_thread().name
792
+ conn = self._Client(self._token.address, authkey=self._authkey)
793
+ dispatch(conn, None, 'accept_connection', (name,))
794
+ self._tls.connection = conn
795
+
796
+ def _callmethod(self, methodname, args=(), kwds={}):
797
+ '''
798
+ Try to call a method of the referent and return a copy of the result
799
+ '''
800
+ try:
801
+ conn = self._tls.connection
802
+ except AttributeError:
803
+ util.debug('thread %r does not own a connection',
804
+ threading.current_thread().name)
805
+ self._connect()
806
+ conn = self._tls.connection
807
+
808
+ conn.send((self._id, methodname, args, kwds))
809
+ kind, result = conn.recv()
810
+
811
+ if kind == '#RETURN':
812
+ return result
813
+ elif kind == '#PROXY':
814
+ exposed, token = result
815
+ proxytype = self._manager._registry[token.typeid][-1]
816
+ token.address = self._token.address
817
+ proxy = proxytype(
818
+ token, self._serializer, manager=self._manager,
819
+ authkey=self._authkey, exposed=exposed
820
+ )
821
+ conn = self._Client(token.address, authkey=self._authkey)
822
+ dispatch(conn, None, 'decref', (token.id,))
823
+ return proxy
824
+ raise convert_to_error(kind, result)
825
+
826
+ def _getvalue(self):
827
+ '''
828
+ Get a copy of the value of the referent
829
+ '''
830
+ return self._callmethod('#GETVALUE')
831
+
832
+ def _incref(self):
833
+ if self._owned_by_manager:
834
+ util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
835
+ return
836
+
837
+ conn = self._Client(self._token.address, authkey=self._authkey)
838
+ dispatch(conn, None, 'incref', (self._id,))
839
+ util.debug('INCREF %r', self._token.id)
840
+
841
+ self._idset.add(self._id)
842
+
843
+ state = self._manager and self._manager._state
844
+
845
+ self._close = util.Finalize(
846
+ self, BaseProxy._decref,
847
+ args=(self._token, self._authkey, state,
848
+ self._tls, self._idset, self._Client),
849
+ exitpriority=10
850
+ )
851
+
852
+ @staticmethod
853
+ def _decref(token, authkey, state, tls, idset, _Client):
854
+ idset.discard(token.id)
855
+
856
+ # check whether manager is still alive
857
+ if state is None or state.value == State.STARTED:
858
+ # tell manager this process no longer cares about referent
859
+ try:
860
+ util.debug('DECREF %r', token.id)
861
+ conn = _Client(token.address, authkey=authkey)
862
+ dispatch(conn, None, 'decref', (token.id,))
863
+ except Exception as e:
864
+ util.debug('... decref failed %s', e)
865
+
866
+ else:
867
+ util.debug('DECREF %r -- manager already shutdown', token.id)
868
+
869
+ # check whether we can close this thread's connection because
870
+ # the process owns no more references to objects for this manager
871
+ if not idset and hasattr(tls, 'connection'):
872
+ util.debug('thread %r has no more proxies so closing conn',
873
+ threading.current_thread().name)
874
+ tls.connection.close()
875
+ del tls.connection
876
+
877
+ def _after_fork(self):
878
+ self._manager = None
879
+ try:
880
+ self._incref()
881
+ except Exception as e:
882
+ # the proxy may just be for a manager which has shutdown
883
+ util.info('incref failed: %s' % e)
884
+
885
+ def __reduce__(self):
886
+ kwds = {}
887
+ if get_spawning_popen() is not None:
888
+ kwds['authkey'] = self._authkey
889
+
890
+ if getattr(self, '_isauto', False):
891
+ kwds['exposed'] = self._exposed_
892
+ return (RebuildProxy,
893
+ (AutoProxy, self._token, self._serializer, kwds))
894
+ else:
895
+ return (RebuildProxy,
896
+ (type(self), self._token, self._serializer, kwds))
897
+
898
+ def __deepcopy__(self, memo):
899
+ return self._getvalue()
900
+
901
+ def __repr__(self):
902
+ return '<%s object, typeid %r at %#x>' % \
903
+ (type(self).__name__, self._token.typeid, id(self))
904
+
905
+ def __str__(self):
906
+ '''
907
+ Return representation of the referent (or a fall-back if that fails)
908
+ '''
909
+ try:
910
+ return self._callmethod('__repr__')
911
+ except Exception:
912
+ return repr(self)[:-1] + "; '__str__()' failed>"
913
+
914
+ #
915
+ # Function used for unpickling
916
+ #
917
+
918
+ def RebuildProxy(func, token, serializer, kwds):
919
+ '''
920
+ Function used for unpickling proxy objects.
921
+ '''
922
+ server = getattr(process.current_process(), '_manager_server', None)
923
+ if server and server.address == token.address:
924
+ util.debug('Rebuild a proxy owned by manager, token=%r', token)
925
+ kwds['manager_owned'] = True
926
+ if token.id not in server.id_to_local_proxy_obj:
927
+ server.id_to_local_proxy_obj[token.id] = \
928
+ server.id_to_obj[token.id]
929
+ incref = (
930
+ kwds.pop('incref', True) and
931
+ not getattr(process.current_process(), '_inheriting', False)
932
+ )
933
+ return func(token, serializer, incref=incref, **kwds)
934
+
935
+ #
936
+ # Functions to create proxies and proxy types
937
+ #
938
+
939
+ def MakeProxyType(name, exposed, _cache={}):
940
+ '''
941
+ Return a proxy type whose methods are given by `exposed`
942
+ '''
943
+ exposed = tuple(exposed)
944
+ try:
945
+ return _cache[(name, exposed)]
946
+ except KeyError:
947
+ pass
948
+
949
+ dic = {}
950
+
951
+ for meth in exposed:
952
+ exec('''def %s(self, /, *args, **kwds):
953
+ return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
954
+
955
+ ProxyType = type(name, (BaseProxy,), dic)
956
+ ProxyType._exposed_ = exposed
957
+ _cache[(name, exposed)] = ProxyType
958
+ return ProxyType
959
+
960
+
961
+ def AutoProxy(token, serializer, manager=None, authkey=None,
962
+ exposed=None, incref=True):
963
+ '''
964
+ Return an auto-proxy for `token`
965
+ '''
966
+ _Client = listener_client[serializer][1]
967
+
968
+ if exposed is None:
969
+ conn = _Client(token.address, authkey=authkey)
970
+ try:
971
+ exposed = dispatch(conn, None, 'get_methods', (token,))
972
+ finally:
973
+ conn.close()
974
+
975
+ if authkey is None and manager is not None:
976
+ authkey = manager._authkey
977
+ if authkey is None:
978
+ authkey = process.current_process().authkey
979
+
980
+ ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
981
+ proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
982
+ incref=incref)
983
+ proxy._isauto = True
984
+ return proxy
985
+
986
+ #
987
+ # Types/callables which we will register with SyncManager
988
+ #
989
+
990
+ class Namespace(object):
991
+ def __init__(self, /, **kwds):
992
+ self.__dict__.update(kwds)
993
+ def __repr__(self):
994
+ items = list(self.__dict__.items())
995
+ temp = []
996
+ for name, value in items:
997
+ if not name.startswith('_'):
998
+ temp.append('%s=%r' % (name, value))
999
+ temp.sort()
1000
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
1001
+
1002
+ class Value(object):
1003
+ def __init__(self, typecode, value, lock=True):
1004
+ self._typecode = typecode
1005
+ self._value = value
1006
+ def get(self):
1007
+ return self._value
1008
+ def set(self, value):
1009
+ self._value = value
1010
+ def __repr__(self):
1011
+ return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
1012
+ value = property(get, set)
1013
+
1014
+ def Array(typecode, sequence, lock=True):
1015
+ return array.array(typecode, sequence)
1016
+
1017
+ #
1018
+ # Proxy types used by SyncManager
1019
+ #
1020
+
1021
+ class IteratorProxy(BaseProxy):
1022
+ _exposed_ = ('__next__', 'send', 'throw', 'close')
1023
+ def __iter__(self):
1024
+ return self
1025
+ def __next__(self, *args):
1026
+ return self._callmethod('__next__', args)
1027
+ def send(self, *args):
1028
+ return self._callmethod('send', args)
1029
+ def throw(self, *args):
1030
+ return self._callmethod('throw', args)
1031
+ def close(self, *args):
1032
+ return self._callmethod('close', args)
1033
+
1034
+
1035
+ class AcquirerProxy(BaseProxy):
1036
+ _exposed_ = ('acquire', 'release')
1037
+ def acquire(self, blocking=True, timeout=None):
1038
+ args = (blocking,) if timeout is None else (blocking, timeout)
1039
+ return self._callmethod('acquire', args)
1040
+ def release(self):
1041
+ return self._callmethod('release')
1042
+ def __enter__(self):
1043
+ return self._callmethod('acquire')
1044
+ def __exit__(self, exc_type, exc_val, exc_tb):
1045
+ return self._callmethod('release')
1046
+
1047
+
1048
+ class ConditionProxy(AcquirerProxy):
1049
+ _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
1050
+ def wait(self, timeout=None):
1051
+ return self._callmethod('wait', (timeout,))
1052
+ def notify(self, n=1):
1053
+ return self._callmethod('notify', (n,))
1054
+ def notify_all(self):
1055
+ return self._callmethod('notify_all')
1056
+ def wait_for(self, predicate, timeout=None):
1057
+ result = predicate()
1058
+ if result:
1059
+ return result
1060
+ if timeout is not None:
1061
+ endtime = getattr(time,'monotonic',time.time)() + timeout
1062
+ else:
1063
+ endtime = None
1064
+ waittime = None
1065
+ while not result:
1066
+ if endtime is not None:
1067
+ waittime = endtime - getattr(time,'monotonic',time.time)()
1068
+ if waittime <= 0:
1069
+ break
1070
+ self.wait(waittime)
1071
+ result = predicate()
1072
+ return result
1073
+
1074
+
1075
+ class EventProxy(BaseProxy):
1076
+ _exposed_ = ('is_set', 'set', 'clear', 'wait')
1077
+ def is_set(self):
1078
+ return self._callmethod('is_set')
1079
+ def set(self):
1080
+ return self._callmethod('set')
1081
+ def clear(self):
1082
+ return self._callmethod('clear')
1083
+ def wait(self, timeout=None):
1084
+ return self._callmethod('wait', (timeout,))
1085
+
1086
+
1087
+ class BarrierProxy(BaseProxy):
1088
+ _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
1089
+ def wait(self, timeout=None):
1090
+ return self._callmethod('wait', (timeout,))
1091
+ def abort(self):
1092
+ return self._callmethod('abort')
1093
+ def reset(self):
1094
+ return self._callmethod('reset')
1095
+ @property
1096
+ def parties(self):
1097
+ return self._callmethod('__getattribute__', ('parties',))
1098
+ @property
1099
+ def n_waiting(self):
1100
+ return self._callmethod('__getattribute__', ('n_waiting',))
1101
+ @property
1102
+ def broken(self):
1103
+ return self._callmethod('__getattribute__', ('broken',))
1104
+
1105
+
1106
+ class NamespaceProxy(BaseProxy):
1107
+ _exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
1108
+ def __getattr__(self, key):
1109
+ if key[0] == '_':
1110
+ return object.__getattribute__(self, key)
1111
+ callmethod = object.__getattribute__(self, '_callmethod')
1112
+ return callmethod('__getattribute__', (key,))
1113
+ def __setattr__(self, key, value):
1114
+ if key[0] == '_':
1115
+ return object.__setattr__(self, key, value)
1116
+ callmethod = object.__getattribute__(self, '_callmethod')
1117
+ return callmethod('__setattr__', (key, value))
1118
+ def __delattr__(self, key):
1119
+ if key[0] == '_':
1120
+ return object.__delattr__(self, key)
1121
+ callmethod = object.__getattribute__(self, '_callmethod')
1122
+ return callmethod('__delattr__', (key,))
1123
+
1124
+
1125
+ class ValueProxy(BaseProxy):
1126
+ _exposed_ = ('get', 'set')
1127
+ def get(self):
1128
+ return self._callmethod('get')
1129
+ def set(self, value):
1130
+ return self._callmethod('set', (value,))
1131
+ value = property(get, set)
1132
+
1133
+ __class_getitem__ = classmethod(types.GenericAlias)
1134
+
1135
+
1136
+ BaseListProxy = MakeProxyType('BaseListProxy', (
1137
+ '__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
1138
+ '__mul__', '__reversed__', '__rmul__', '__setitem__',
1139
+ 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
1140
+ 'reverse', 'sort', '__imul__'
1141
+ ))
1142
+ class ListProxy(BaseListProxy):
1143
+ def __iadd__(self, value):
1144
+ self._callmethod('extend', (value,))
1145
+ return self
1146
+ def __imul__(self, value):
1147
+ self._callmethod('__imul__', (value,))
1148
+ return self
1149
+
1150
+
1151
+ DictProxy = MakeProxyType('DictProxy', (
1152
+ '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
1153
+ '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
1154
+ 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
1155
+ ))
1156
+ DictProxy._method_to_typeid_ = {
1157
+ '__iter__': 'Iterator',
1158
+ }
1159
+
1160
+
1161
+ ArrayProxy = MakeProxyType('ArrayProxy', (
1162
+ '__len__', '__getitem__', '__setitem__'
1163
+ ))
1164
+
1165
+
1166
+ BasePoolProxy = MakeProxyType('PoolProxy', (
1167
+ 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
1168
+ 'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
1169
+ ))
1170
+ BasePoolProxy._method_to_typeid_ = {
1171
+ 'apply_async': 'AsyncResult',
1172
+ 'map_async': 'AsyncResult',
1173
+ 'starmap_async': 'AsyncResult',
1174
+ 'imap': 'Iterator',
1175
+ 'imap_unordered': 'Iterator'
1176
+ }
1177
+ class PoolProxy(BasePoolProxy):
1178
+ def __enter__(self):
1179
+ return self
1180
+ def __exit__(self, exc_type, exc_val, exc_tb):
1181
+ self.terminate()
1182
+
1183
+ #
1184
+ # Definition of SyncManager
1185
+ #
1186
+
1187
+ class SyncManager(BaseManager):
1188
+ '''
1189
+ Subclass of `BaseManager` which supports a number of shared object types.
1190
+
1191
+ The types registered are those intended for the synchronization
1192
+ of threads, plus `dict`, `list` and `Namespace`.
1193
+
1194
+ The `multiprocess.Manager()` function creates started instances of
1195
+ this class.
1196
+ '''
1197
+
1198
+ SyncManager.register('Queue', queue.Queue)
1199
+ SyncManager.register('JoinableQueue', queue.Queue)
1200
+ SyncManager.register('Event', threading.Event, EventProxy)
1201
+ SyncManager.register('Lock', threading.Lock, AcquirerProxy)
1202
+ SyncManager.register('RLock', threading.RLock, AcquirerProxy)
1203
+ SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
1204
+ SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
1205
+ AcquirerProxy)
1206
+ SyncManager.register('Condition', threading.Condition, ConditionProxy)
1207
+ SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
1208
+ SyncManager.register('Pool', pool.Pool, PoolProxy)
1209
+ SyncManager.register('list', list, ListProxy)
1210
+ SyncManager.register('dict', dict, DictProxy)
1211
+ SyncManager.register('Value', Value, ValueProxy)
1212
+ SyncManager.register('Array', Array, ArrayProxy)
1213
+ SyncManager.register('Namespace', Namespace, NamespaceProxy)
1214
+
1215
+ # types returned by methods of PoolProxy
1216
+ SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
1217
+ SyncManager.register('AsyncResult', create_method=False)
1218
+
1219
+ #
1220
+ # Definition of SharedMemoryManager and SharedMemoryServer
1221
+ #
1222
+
1223
+ if HAS_SHMEM:
1224
+ class _SharedMemoryTracker:
1225
+ "Manages one or more shared memory segments."
1226
+
1227
+ def __init__(self, name, segment_names=[]):
1228
+ self.shared_memory_context_name = name
1229
+ self.segment_names = segment_names
1230
+
1231
+ def register_segment(self, segment_name):
1232
+ "Adds the supplied shared memory block name to tracker."
1233
+ util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
1234
+ self.segment_names.append(segment_name)
1235
+
1236
+ def destroy_segment(self, segment_name):
1237
+ """Calls unlink() on the shared memory block with the supplied name
1238
+ and removes it from the list of blocks being tracked."""
1239
+ util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
1240
+ self.segment_names.remove(segment_name)
1241
+ segment = shared_memory.SharedMemory(segment_name)
1242
+ segment.close()
1243
+ segment.unlink()
1244
+
1245
+ def unlink(self):
1246
+ "Calls destroy_segment() on all tracked shared memory blocks."
1247
+ for segment_name in self.segment_names[:]:
1248
+ self.destroy_segment(segment_name)
1249
+
1250
+ def __del__(self):
1251
+ util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
1252
+ self.unlink()
1253
+
1254
+ def __getstate__(self):
1255
+ return (self.shared_memory_context_name, self.segment_names)
1256
+
1257
+ def __setstate__(self, state):
1258
+ self.__init__(*state)
1259
+
1260
+
1261
+ class SharedMemoryServer(Server):
1262
+
1263
+ public = Server.public + \
1264
+ ['track_segment', 'release_segment', 'list_segments']
1265
+
1266
+ def __init__(self, *args, **kwargs):
1267
+ Server.__init__(self, *args, **kwargs)
1268
+ address = self.address
1269
+ # The address of Linux abstract namespaces can be bytes
1270
+ if isinstance(address, bytes):
1271
+ address = os.fsdecode(address)
1272
+ self.shared_memory_context = \
1273
+ _SharedMemoryTracker(f"shm_{address}_{getpid()}")
1274
+ util.debug(f"SharedMemoryServer started by pid {getpid()}")
1275
+
1276
+ def create(self, c, typeid, /, *args, **kwargs):
1277
+ """Create a new distributed-shared object (not backed by a shared
1278
+ memory block) and return its id to be used in a Proxy Object."""
1279
+ # Unless set up as a shared proxy, don't make shared_memory_context
1280
+ # a standard part of kwargs. This makes things easier for supplying
1281
+ # simple functions.
1282
+ if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
1283
+ kwargs['shared_memory_context'] = self.shared_memory_context
1284
+ return Server.create(self, c, typeid, *args, **kwargs)
1285
+
1286
+ def shutdown(self, c):
1287
+ "Call unlink() on all tracked shared memory, terminate the Server."
1288
+ self.shared_memory_context.unlink()
1289
+ return Server.shutdown(self, c)
1290
+
1291
+ def track_segment(self, c, segment_name):
1292
+ "Adds the supplied shared memory block name to Server's tracker."
1293
+ self.shared_memory_context.register_segment(segment_name)
1294
+
1295
+ def release_segment(self, c, segment_name):
1296
+ """Calls unlink() on the shared memory block with the supplied name
1297
+ and removes it from the tracker instance inside the Server."""
1298
+ self.shared_memory_context.destroy_segment(segment_name)
1299
+
1300
+ def list_segments(self, c):
1301
+ """Returns a list of names of shared memory blocks that the Server
1302
+ is currently tracking."""
1303
+ return self.shared_memory_context.segment_names
1304
+
1305
+
1306
+ class SharedMemoryManager(BaseManager):
1307
+ """Like SyncManager but uses SharedMemoryServer instead of Server.
1308
+
1309
+ It provides methods for creating and returning SharedMemory instances
1310
+ and for creating a list-like object (ShareableList) backed by shared
1311
+ memory. It also provides methods that create and return Proxy Objects
1312
+ that support synchronization across processes (i.e. multi-process-safe
1313
+ locks and semaphores).
1314
+ """
1315
+
1316
+ _Server = SharedMemoryServer
1317
+
1318
+ def __init__(self, *args, **kwargs):
1319
+ if os.name == "posix":
1320
+ # bpo-36867: Ensure the resource_tracker is running before
1321
+ # launching the manager process, so that concurrent
1322
+ # shared_memory manipulation both in the manager and in the
1323
+ # current process does not create two resource_tracker
1324
+ # processes.
1325
+ from . import resource_tracker
1326
+ resource_tracker.ensure_running()
1327
+ BaseManager.__init__(self, *args, **kwargs)
1328
+ util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
1329
+
1330
+ def __del__(self):
1331
+ util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
1332
+ pass
1333
+
1334
+ def get_server(self):
1335
+ 'Better than monkeypatching for now; merge into Server ultimately'
1336
+ if self._state.value != State.INITIAL:
1337
+ if self._state.value == State.STARTED:
1338
+ raise ProcessError("Already started SharedMemoryServer")
1339
+ elif self._state.value == State.SHUTDOWN:
1340
+ raise ProcessError("SharedMemoryManager has shut down")
1341
+ else:
1342
+ raise ProcessError(
1343
+ "Unknown state {!r}".format(self._state.value))
1344
+ return self._Server(self._registry, self._address,
1345
+ self._authkey, self._serializer)
1346
+
1347
+ def SharedMemory(self, size):
1348
+ """Returns a new SharedMemory instance with the specified size in
1349
+ bytes, to be tracked by the manager."""
1350
+ with self._Client(self._address, authkey=self._authkey) as conn:
1351
+ sms = shared_memory.SharedMemory(None, create=True, size=size)
1352
+ try:
1353
+ dispatch(conn, None, 'track_segment', (sms.name,))
1354
+ except BaseException as e:
1355
+ sms.unlink()
1356
+ raise e
1357
+ return sms
1358
+
1359
+ def ShareableList(self, sequence):
1360
+ """Returns a new ShareableList instance populated with the values
1361
+ from the input sequence, to be tracked by the manager."""
1362
+ with self._Client(self._address, authkey=self._authkey) as conn:
1363
+ sl = shared_memory.ShareableList(sequence)
1364
+ try:
1365
+ dispatch(conn, None, 'track_segment', (sl.shm.name,))
1366
+ except BaseException as e:
1367
+ sl.shm.unlink()
1368
+ raise e
1369
+ return sl
lib/python3.10/site-packages/multiprocess/pool.py ADDED
@@ -0,0 +1,954 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing the `Pool` class for managing a process pool
3
+ #
4
+ # multiprocessing/pool.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = ['Pool', 'ThreadPool']
11
+
12
+ #
13
+ # Imports
14
+ #
15
+
16
+ import collections
17
+ import itertools
18
+ import os
19
+ import queue
20
+ import threading
21
+ import time
22
+ import traceback
23
+ import types
24
+ import warnings
25
+
26
+ # If threading is available then ThreadPool should be provided. Therefore
27
+ # we avoid top-level imports which are liable to fail on some systems.
28
+ from . import util
29
+ from . import get_context, TimeoutError
30
+ from .connection import wait
31
+
32
+ #
33
+ # Constants representing the state of a pool
34
+ #
35
+
36
+ INIT = "INIT"
37
+ RUN = "RUN"
38
+ CLOSE = "CLOSE"
39
+ TERMINATE = "TERMINATE"
40
+
41
+ #
42
+ # Miscellaneous
43
+ #
44
+
45
+ job_counter = itertools.count()
46
+
47
+ def mapstar(args):
48
+ return list(map(*args))
49
+
50
+ def starmapstar(args):
51
+ return list(itertools.starmap(args[0], args[1]))
52
+
53
+ #
54
+ # Hack to embed stringification of remote traceback in local traceback
55
+ #
56
+
57
+ class RemoteTraceback(Exception):
58
+ def __init__(self, tb):
59
+ self.tb = tb
60
+ def __str__(self):
61
+ return self.tb
62
+
63
+ class ExceptionWithTraceback:
64
+ def __init__(self, exc, tb):
65
+ tb = traceback.format_exception(type(exc), exc, tb)
66
+ tb = ''.join(tb)
67
+ self.exc = exc
68
+ self.tb = '\n"""\n%s"""' % tb
69
+ def __reduce__(self):
70
+ return rebuild_exc, (self.exc, self.tb)
71
+
72
+ def rebuild_exc(exc, tb):
73
+ exc.__cause__ = RemoteTraceback(tb)
74
+ return exc
75
+
76
+ #
77
+ # Code run by worker processes
78
+ #
79
+
80
+ class MaybeEncodingError(Exception):
81
+ """Wraps possible unpickleable errors, so they can be
82
+ safely sent through the socket."""
83
+
84
+ def __init__(self, exc, value):
85
+ self.exc = repr(exc)
86
+ self.value = repr(value)
87
+ super(MaybeEncodingError, self).__init__(self.exc, self.value)
88
+
89
+ def __str__(self):
90
+ return "Error sending result: '%s'. Reason: '%s'" % (self.value,
91
+ self.exc)
92
+
93
+ def __repr__(self):
94
+ return "<%s: %s>" % (self.__class__.__name__, self)
95
+
96
+
97
+ def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
98
+ wrap_exception=False):
99
+ if (maxtasks is not None) and not (isinstance(maxtasks, int)
100
+ and maxtasks >= 1):
101
+ raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks))
102
+ put = outqueue.put
103
+ get = inqueue.get
104
+ if hasattr(inqueue, '_writer'):
105
+ inqueue._writer.close()
106
+ outqueue._reader.close()
107
+
108
+ if initializer is not None:
109
+ initializer(*initargs)
110
+
111
+ completed = 0
112
+ while maxtasks is None or (maxtasks and completed < maxtasks):
113
+ try:
114
+ task = get()
115
+ except (EOFError, OSError):
116
+ util.debug('worker got EOFError or OSError -- exiting')
117
+ break
118
+
119
+ if task is None:
120
+ util.debug('worker got sentinel -- exiting')
121
+ break
122
+
123
+ job, i, func, args, kwds = task
124
+ try:
125
+ result = (True, func(*args, **kwds))
126
+ except Exception as e:
127
+ if wrap_exception and func is not _helper_reraises_exception:
128
+ e = ExceptionWithTraceback(e, e.__traceback__)
129
+ result = (False, e)
130
+ try:
131
+ put((job, i, result))
132
+ except Exception as e:
133
+ wrapped = MaybeEncodingError(e, result[1])
134
+ util.debug("Possible encoding error while sending result: %s" % (
135
+ wrapped))
136
+ put((job, i, (False, wrapped)))
137
+
138
+ task = job = result = func = args = kwds = None
139
+ completed += 1
140
+ util.debug('worker exiting after %d tasks' % completed)
141
+
142
+ def _helper_reraises_exception(ex):
143
+ 'Pickle-able helper function for use by _guarded_task_generation.'
144
+ raise ex
145
+
146
+ #
147
+ # Class representing a process pool
148
+ #
149
+
150
+ class _PoolCache(dict):
151
+ """
152
+ Class that implements a cache for the Pool class that will notify
153
+ the pool management threads every time the cache is emptied. The
154
+ notification is done by the use of a queue that is provided when
155
+ instantiating the cache.
156
+ """
157
+ def __init__(self, /, *args, notifier=None, **kwds):
158
+ self.notifier = notifier
159
+ super().__init__(*args, **kwds)
160
+
161
+ def __delitem__(self, item):
162
+ super().__delitem__(item)
163
+
164
+ # Notify that the cache is empty. This is important because the
165
+ # pool keeps maintaining workers until the cache gets drained. This
166
+ # eliminates a race condition in which a task is finished after the
167
+ # the pool's _handle_workers method has enter another iteration of the
168
+ # loop. In this situation, the only event that can wake up the pool
169
+ # is the cache to be emptied (no more tasks available).
170
+ if not self:
171
+ self.notifier.put(None)
172
+
173
+ class Pool(object):
174
+ '''
175
+ Class which supports an async version of applying functions to arguments.
176
+ '''
177
+ _wrap_exception = True
178
+
179
+ @staticmethod
180
+ def Process(ctx, *args, **kwds):
181
+ return ctx.Process(*args, **kwds)
182
+
183
+ def __init__(self, processes=None, initializer=None, initargs=(),
184
+ maxtasksperchild=None, context=None):
185
+ # Attributes initialized early to make sure that they exist in
186
+ # __del__() if __init__() raises an exception
187
+ self._pool = []
188
+ self._state = INIT
189
+
190
+ self._ctx = context or get_context()
191
+ self._setup_queues()
192
+ self._taskqueue = queue.SimpleQueue()
193
+ # The _change_notifier queue exist to wake up self._handle_workers()
194
+ # when the cache (self._cache) is empty or when there is a change in
195
+ # the _state variable of the thread that runs _handle_workers.
196
+ self._change_notifier = self._ctx.SimpleQueue()
197
+ self._cache = _PoolCache(notifier=self._change_notifier)
198
+ self._maxtasksperchild = maxtasksperchild
199
+ self._initializer = initializer
200
+ self._initargs = initargs
201
+
202
+ if processes is None:
203
+ processes = os.cpu_count() or 1
204
+ if processes < 1:
205
+ raise ValueError("Number of processes must be at least 1")
206
+
207
+ if initializer is not None and not callable(initializer):
208
+ raise TypeError('initializer must be a callable')
209
+
210
+ self._processes = processes
211
+ try:
212
+ self._repopulate_pool()
213
+ except Exception:
214
+ for p in self._pool:
215
+ if p.exitcode is None:
216
+ p.terminate()
217
+ for p in self._pool:
218
+ p.join()
219
+ raise
220
+
221
+ sentinels = self._get_sentinels()
222
+
223
+ self._worker_handler = threading.Thread(
224
+ target=Pool._handle_workers,
225
+ args=(self._cache, self._taskqueue, self._ctx, self.Process,
226
+ self._processes, self._pool, self._inqueue, self._outqueue,
227
+ self._initializer, self._initargs, self._maxtasksperchild,
228
+ self._wrap_exception, sentinels, self._change_notifier)
229
+ )
230
+ self._worker_handler.daemon = True
231
+ self._worker_handler._state = RUN
232
+ self._worker_handler.start()
233
+
234
+
235
+ self._task_handler = threading.Thread(
236
+ target=Pool._handle_tasks,
237
+ args=(self._taskqueue, self._quick_put, self._outqueue,
238
+ self._pool, self._cache)
239
+ )
240
+ self._task_handler.daemon = True
241
+ self._task_handler._state = RUN
242
+ self._task_handler.start()
243
+
244
+ self._result_handler = threading.Thread(
245
+ target=Pool._handle_results,
246
+ args=(self._outqueue, self._quick_get, self._cache)
247
+ )
248
+ self._result_handler.daemon = True
249
+ self._result_handler._state = RUN
250
+ self._result_handler.start()
251
+
252
+ self._terminate = util.Finalize(
253
+ self, self._terminate_pool,
254
+ args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
255
+ self._change_notifier, self._worker_handler, self._task_handler,
256
+ self._result_handler, self._cache),
257
+ exitpriority=15
258
+ )
259
+ self._state = RUN
260
+
261
+ # Copy globals as function locals to make sure that they are available
262
+ # during Python shutdown when the Pool is destroyed.
263
+ def __del__(self, _warn=warnings.warn, RUN=RUN):
264
+ if self._state == RUN:
265
+ _warn(f"unclosed running multiprocessing pool {self!r}",
266
+ ResourceWarning, source=self)
267
+ if getattr(self, '_change_notifier', None) is not None:
268
+ self._change_notifier.put(None)
269
+
270
+ def __repr__(self):
271
+ cls = self.__class__
272
+ return (f'<{cls.__module__}.{cls.__qualname__} '
273
+ f'state={self._state} '
274
+ f'pool_size={len(self._pool)}>')
275
+
276
+ def _get_sentinels(self):
277
+ task_queue_sentinels = [self._outqueue._reader]
278
+ self_notifier_sentinels = [self._change_notifier._reader]
279
+ return [*task_queue_sentinels, *self_notifier_sentinels]
280
+
281
+ @staticmethod
282
+ def _get_worker_sentinels(workers):
283
+ return [worker.sentinel for worker in
284
+ workers if hasattr(worker, "sentinel")]
285
+
286
+ @staticmethod
287
+ def _join_exited_workers(pool):
288
+ """Cleanup after any worker processes which have exited due to reaching
289
+ their specified lifetime. Returns True if any workers were cleaned up.
290
+ """
291
+ cleaned = False
292
+ for i in reversed(range(len(pool))):
293
+ worker = pool[i]
294
+ if worker.exitcode is not None:
295
+ # worker exited
296
+ util.debug('cleaning up worker %d' % i)
297
+ worker.join()
298
+ cleaned = True
299
+ del pool[i]
300
+ return cleaned
301
+
302
+ def _repopulate_pool(self):
303
+ return self._repopulate_pool_static(self._ctx, self.Process,
304
+ self._processes,
305
+ self._pool, self._inqueue,
306
+ self._outqueue, self._initializer,
307
+ self._initargs,
308
+ self._maxtasksperchild,
309
+ self._wrap_exception)
310
+
311
+ @staticmethod
312
+ def _repopulate_pool_static(ctx, Process, processes, pool, inqueue,
313
+ outqueue, initializer, initargs,
314
+ maxtasksperchild, wrap_exception):
315
+ """Bring the number of pool processes up to the specified number,
316
+ for use after reaping workers which have exited.
317
+ """
318
+ for i in range(processes - len(pool)):
319
+ w = Process(ctx, target=worker,
320
+ args=(inqueue, outqueue,
321
+ initializer,
322
+ initargs, maxtasksperchild,
323
+ wrap_exception))
324
+ w.name = w.name.replace('Process', 'PoolWorker')
325
+ w.daemon = True
326
+ w.start()
327
+ pool.append(w)
328
+ util.debug('added worker')
329
+
330
+ @staticmethod
331
+ def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue,
332
+ initializer, initargs, maxtasksperchild,
333
+ wrap_exception):
334
+ """Clean up any exited workers and start replacements for them.
335
+ """
336
+ if Pool._join_exited_workers(pool):
337
+ Pool._repopulate_pool_static(ctx, Process, processes, pool,
338
+ inqueue, outqueue, initializer,
339
+ initargs, maxtasksperchild,
340
+ wrap_exception)
341
+
342
+ def _setup_queues(self):
343
+ self._inqueue = self._ctx.SimpleQueue()
344
+ self._outqueue = self._ctx.SimpleQueue()
345
+ self._quick_put = self._inqueue._writer.send
346
+ self._quick_get = self._outqueue._reader.recv
347
+
348
+ def _check_running(self):
349
+ if self._state != RUN:
350
+ raise ValueError("Pool not running")
351
+
352
+ def apply(self, func, args=(), kwds={}):
353
+ '''
354
+ Equivalent of `func(*args, **kwds)`.
355
+ Pool must be running.
356
+ '''
357
+ return self.apply_async(func, args, kwds).get()
358
+
359
+ def map(self, func, iterable, chunksize=None):
360
+ '''
361
+ Apply `func` to each element in `iterable`, collecting the results
362
+ in a list that is returned.
363
+ '''
364
+ return self._map_async(func, iterable, mapstar, chunksize).get()
365
+
366
+ def starmap(self, func, iterable, chunksize=None):
367
+ '''
368
+ Like `map()` method but the elements of the `iterable` are expected to
369
+ be iterables as well and will be unpacked as arguments. Hence
370
+ `func` and (a, b) becomes func(a, b).
371
+ '''
372
+ return self._map_async(func, iterable, starmapstar, chunksize).get()
373
+
374
+ def starmap_async(self, func, iterable, chunksize=None, callback=None,
375
+ error_callback=None):
376
+ '''
377
+ Asynchronous version of `starmap()` method.
378
+ '''
379
+ return self._map_async(func, iterable, starmapstar, chunksize,
380
+ callback, error_callback)
381
+
382
+ def _guarded_task_generation(self, result_job, func, iterable):
383
+ '''Provides a generator of tasks for imap and imap_unordered with
384
+ appropriate handling for iterables which throw exceptions during
385
+ iteration.'''
386
+ try:
387
+ i = -1
388
+ for i, x in enumerate(iterable):
389
+ yield (result_job, i, func, (x,), {})
390
+ except Exception as e:
391
+ yield (result_job, i+1, _helper_reraises_exception, (e,), {})
392
+
393
+ def imap(self, func, iterable, chunksize=1):
394
+ '''
395
+ Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
396
+ '''
397
+ self._check_running()
398
+ if chunksize == 1:
399
+ result = IMapIterator(self)
400
+ self._taskqueue.put(
401
+ (
402
+ self._guarded_task_generation(result._job, func, iterable),
403
+ result._set_length
404
+ ))
405
+ return result
406
+ else:
407
+ if chunksize < 1:
408
+ raise ValueError(
409
+ "Chunksize must be 1+, not {0:n}".format(
410
+ chunksize))
411
+ task_batches = Pool._get_tasks(func, iterable, chunksize)
412
+ result = IMapIterator(self)
413
+ self._taskqueue.put(
414
+ (
415
+ self._guarded_task_generation(result._job,
416
+ mapstar,
417
+ task_batches),
418
+ result._set_length
419
+ ))
420
+ return (item for chunk in result for item in chunk)
421
+
422
+ def imap_unordered(self, func, iterable, chunksize=1):
423
+ '''
424
+ Like `imap()` method but ordering of results is arbitrary.
425
+ '''
426
+ self._check_running()
427
+ if chunksize == 1:
428
+ result = IMapUnorderedIterator(self)
429
+ self._taskqueue.put(
430
+ (
431
+ self._guarded_task_generation(result._job, func, iterable),
432
+ result._set_length
433
+ ))
434
+ return result
435
+ else:
436
+ if chunksize < 1:
437
+ raise ValueError(
438
+ "Chunksize must be 1+, not {0!r}".format(chunksize))
439
+ task_batches = Pool._get_tasks(func, iterable, chunksize)
440
+ result = IMapUnorderedIterator(self)
441
+ self._taskqueue.put(
442
+ (
443
+ self._guarded_task_generation(result._job,
444
+ mapstar,
445
+ task_batches),
446
+ result._set_length
447
+ ))
448
+ return (item for chunk in result for item in chunk)
449
+
450
+ def apply_async(self, func, args=(), kwds={}, callback=None,
451
+ error_callback=None):
452
+ '''
453
+ Asynchronous version of `apply()` method.
454
+ '''
455
+ self._check_running()
456
+ result = ApplyResult(self, callback, error_callback)
457
+ self._taskqueue.put(([(result._job, 0, func, args, kwds)], None))
458
+ return result
459
+
460
+ def map_async(self, func, iterable, chunksize=None, callback=None,
461
+ error_callback=None):
462
+ '''
463
+ Asynchronous version of `map()` method.
464
+ '''
465
+ return self._map_async(func, iterable, mapstar, chunksize, callback,
466
+ error_callback)
467
+
468
+ def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
469
+ error_callback=None):
470
+ '''
471
+ Helper function to implement map, starmap and their async counterparts.
472
+ '''
473
+ self._check_running()
474
+ if not hasattr(iterable, '__len__'):
475
+ iterable = list(iterable)
476
+
477
+ if chunksize is None:
478
+ chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
479
+ if extra:
480
+ chunksize += 1
481
+ if len(iterable) == 0:
482
+ chunksize = 0
483
+
484
+ task_batches = Pool._get_tasks(func, iterable, chunksize)
485
+ result = MapResult(self, chunksize, len(iterable), callback,
486
+ error_callback=error_callback)
487
+ self._taskqueue.put(
488
+ (
489
+ self._guarded_task_generation(result._job,
490
+ mapper,
491
+ task_batches),
492
+ None
493
+ )
494
+ )
495
+ return result
496
+
497
+ @staticmethod
498
+ def _wait_for_updates(sentinels, change_notifier, timeout=None):
499
+ wait(sentinels, timeout=timeout)
500
+ while not change_notifier.empty():
501
+ change_notifier.get()
502
+
503
+ @classmethod
504
+ def _handle_workers(cls, cache, taskqueue, ctx, Process, processes,
505
+ pool, inqueue, outqueue, initializer, initargs,
506
+ maxtasksperchild, wrap_exception, sentinels,
507
+ change_notifier):
508
+ thread = threading.current_thread()
509
+
510
+ # Keep maintaining workers until the cache gets drained, unless the pool
511
+ # is terminated.
512
+ while thread._state == RUN or (cache and thread._state != TERMINATE):
513
+ cls._maintain_pool(ctx, Process, processes, pool, inqueue,
514
+ outqueue, initializer, initargs,
515
+ maxtasksperchild, wrap_exception)
516
+
517
+ current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels]
518
+
519
+ cls._wait_for_updates(current_sentinels, change_notifier)
520
+ # send sentinel to stop workers
521
+ taskqueue.put(None)
522
+ util.debug('worker handler exiting')
523
+
524
+ @staticmethod
525
+ def _handle_tasks(taskqueue, put, outqueue, pool, cache):
526
+ thread = threading.current_thread()
527
+
528
+ for taskseq, set_length in iter(taskqueue.get, None):
529
+ task = None
530
+ try:
531
+ # iterating taskseq cannot fail
532
+ for task in taskseq:
533
+ if thread._state != RUN:
534
+ util.debug('task handler found thread._state != RUN')
535
+ break
536
+ try:
537
+ put(task)
538
+ except Exception as e:
539
+ job, idx = task[:2]
540
+ try:
541
+ cache[job]._set(idx, (False, e))
542
+ except KeyError:
543
+ pass
544
+ else:
545
+ if set_length:
546
+ util.debug('doing set_length()')
547
+ idx = task[1] if task else -1
548
+ set_length(idx + 1)
549
+ continue
550
+ break
551
+ finally:
552
+ task = taskseq = job = None
553
+ else:
554
+ util.debug('task handler got sentinel')
555
+
556
+ try:
557
+ # tell result handler to finish when cache is empty
558
+ util.debug('task handler sending sentinel to result handler')
559
+ outqueue.put(None)
560
+
561
+ # tell workers there is no more work
562
+ util.debug('task handler sending sentinel to workers')
563
+ for p in pool:
564
+ put(None)
565
+ except OSError:
566
+ util.debug('task handler got OSError when sending sentinels')
567
+
568
+ util.debug('task handler exiting')
569
+
570
+ @staticmethod
571
+ def _handle_results(outqueue, get, cache):
572
+ thread = threading.current_thread()
573
+
574
+ while 1:
575
+ try:
576
+ task = get()
577
+ except (OSError, EOFError):
578
+ util.debug('result handler got EOFError/OSError -- exiting')
579
+ return
580
+
581
+ if thread._state != RUN:
582
+ assert thread._state == TERMINATE, "Thread not in TERMINATE"
583
+ util.debug('result handler found thread._state=TERMINATE')
584
+ break
585
+
586
+ if task is None:
587
+ util.debug('result handler got sentinel')
588
+ break
589
+
590
+ job, i, obj = task
591
+ try:
592
+ cache[job]._set(i, obj)
593
+ except KeyError:
594
+ pass
595
+ task = job = obj = None
596
+
597
+ while cache and thread._state != TERMINATE:
598
+ try:
599
+ task = get()
600
+ except (OSError, EOFError):
601
+ util.debug('result handler got EOFError/OSError -- exiting')
602
+ return
603
+
604
+ if task is None:
605
+ util.debug('result handler ignoring extra sentinel')
606
+ continue
607
+ job, i, obj = task
608
+ try:
609
+ cache[job]._set(i, obj)
610
+ except KeyError:
611
+ pass
612
+ task = job = obj = None
613
+
614
+ if hasattr(outqueue, '_reader'):
615
+ util.debug('ensuring that outqueue is not full')
616
+ # If we don't make room available in outqueue then
617
+ # attempts to add the sentinel (None) to outqueue may
618
+ # block. There is guaranteed to be no more than 2 sentinels.
619
+ try:
620
+ for i in range(10):
621
+ if not outqueue._reader.poll():
622
+ break
623
+ get()
624
+ except (OSError, EOFError):
625
+ pass
626
+
627
+ util.debug('result handler exiting: len(cache)=%s, thread._state=%s',
628
+ len(cache), thread._state)
629
+
630
+ @staticmethod
631
+ def _get_tasks(func, it, size):
632
+ it = iter(it)
633
+ while 1:
634
+ x = tuple(itertools.islice(it, size))
635
+ if not x:
636
+ return
637
+ yield (func, x)
638
+
639
+ def __reduce__(self):
640
+ raise NotImplementedError(
641
+ 'pool objects cannot be passed between processes or pickled'
642
+ )
643
+
644
+ def close(self):
645
+ util.debug('closing pool')
646
+ if self._state == RUN:
647
+ self._state = CLOSE
648
+ self._worker_handler._state = CLOSE
649
+ self._change_notifier.put(None)
650
+
651
+ def terminate(self):
652
+ util.debug('terminating pool')
653
+ self._state = TERMINATE
654
+ self._terminate()
655
+
656
+ def join(self):
657
+ util.debug('joining pool')
658
+ if self._state == RUN:
659
+ raise ValueError("Pool is still running")
660
+ elif self._state not in (CLOSE, TERMINATE):
661
+ raise ValueError("In unknown state")
662
+ self._worker_handler.join()
663
+ self._task_handler.join()
664
+ self._result_handler.join()
665
+ for p in self._pool:
666
+ p.join()
667
+
668
+ @staticmethod
669
+ def _help_stuff_finish(inqueue, task_handler, size):
670
+ # task_handler may be blocked trying to put items on inqueue
671
+ util.debug('removing tasks from inqueue until task handler finished')
672
+ inqueue._rlock.acquire()
673
+ while task_handler.is_alive() and inqueue._reader.poll():
674
+ inqueue._reader.recv()
675
+ time.sleep(0)
676
+
677
+ @classmethod
678
+ def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier,
679
+ worker_handler, task_handler, result_handler, cache):
680
+ # this is guaranteed to only be called once
681
+ util.debug('finalizing pool')
682
+
683
+ # Notify that the worker_handler state has been changed so the
684
+ # _handle_workers loop can be unblocked (and exited) in order to
685
+ # send the finalization sentinel all the workers.
686
+ worker_handler._state = TERMINATE
687
+ change_notifier.put(None)
688
+
689
+ task_handler._state = TERMINATE
690
+
691
+ util.debug('helping task handler/workers to finish')
692
+ cls._help_stuff_finish(inqueue, task_handler, len(pool))
693
+
694
+ if (not result_handler.is_alive()) and (len(cache) != 0):
695
+ raise AssertionError(
696
+ "Cannot have cache with result_hander not alive")
697
+
698
+ result_handler._state = TERMINATE
699
+ change_notifier.put(None)
700
+ outqueue.put(None) # sentinel
701
+
702
+ # We must wait for the worker handler to exit before terminating
703
+ # workers because we don't want workers to be restarted behind our back.
704
+ util.debug('joining worker handler')
705
+ if threading.current_thread() is not worker_handler:
706
+ worker_handler.join()
707
+
708
+ # Terminate workers which haven't already finished.
709
+ if pool and hasattr(pool[0], 'terminate'):
710
+ util.debug('terminating workers')
711
+ for p in pool:
712
+ if p.exitcode is None:
713
+ p.terminate()
714
+
715
+ util.debug('joining task handler')
716
+ if threading.current_thread() is not task_handler:
717
+ task_handler.join()
718
+
719
+ util.debug('joining result handler')
720
+ if threading.current_thread() is not result_handler:
721
+ result_handler.join()
722
+
723
+ if pool and hasattr(pool[0], 'terminate'):
724
+ util.debug('joining pool workers')
725
+ for p in pool:
726
+ if p.is_alive():
727
+ # worker has not yet exited
728
+ util.debug('cleaning up worker %d' % p.pid)
729
+ p.join()
730
+
731
+ def __enter__(self):
732
+ self._check_running()
733
+ return self
734
+
735
+ def __exit__(self, exc_type, exc_val, exc_tb):
736
+ self.terminate()
737
+
738
+ #
739
+ # Class whose instances are returned by `Pool.apply_async()`
740
+ #
741
+
742
+ class ApplyResult(object):
743
+
744
+ def __init__(self, pool, callback, error_callback):
745
+ self._pool = pool
746
+ self._event = threading.Event()
747
+ self._job = next(job_counter)
748
+ self._cache = pool._cache
749
+ self._callback = callback
750
+ self._error_callback = error_callback
751
+ self._cache[self._job] = self
752
+
753
+ def ready(self):
754
+ return self._event.is_set()
755
+
756
+ def successful(self):
757
+ if not self.ready():
758
+ raise ValueError("{0!r} not ready".format(self))
759
+ return self._success
760
+
761
+ def wait(self, timeout=None):
762
+ self._event.wait(timeout)
763
+
764
+ def get(self, timeout=None):
765
+ self.wait(timeout)
766
+ if not self.ready():
767
+ raise TimeoutError
768
+ if self._success:
769
+ return self._value
770
+ else:
771
+ raise self._value
772
+
773
+ def _set(self, i, obj):
774
+ self._success, self._value = obj
775
+ if self._callback and self._success:
776
+ self._callback(self._value)
777
+ if self._error_callback and not self._success:
778
+ self._error_callback(self._value)
779
+ self._event.set()
780
+ del self._cache[self._job]
781
+ self._pool = None
782
+
783
+ __class_getitem__ = classmethod(types.GenericAlias)
784
+
785
+ AsyncResult = ApplyResult # create alias -- see #17805
786
+
787
+ #
788
+ # Class whose instances are returned by `Pool.map_async()`
789
+ #
790
+
791
+ class MapResult(ApplyResult):
792
+
793
+ def __init__(self, pool, chunksize, length, callback, error_callback):
794
+ ApplyResult.__init__(self, pool, callback,
795
+ error_callback=error_callback)
796
+ self._success = True
797
+ self._value = [None] * length
798
+ self._chunksize = chunksize
799
+ if chunksize <= 0:
800
+ self._number_left = 0
801
+ self._event.set()
802
+ del self._cache[self._job]
803
+ else:
804
+ self._number_left = length//chunksize + bool(length % chunksize)
805
+
806
+ def _set(self, i, success_result):
807
+ self._number_left -= 1
808
+ success, result = success_result
809
+ if success and self._success:
810
+ self._value[i*self._chunksize:(i+1)*self._chunksize] = result
811
+ if self._number_left == 0:
812
+ if self._callback:
813
+ self._callback(self._value)
814
+ del self._cache[self._job]
815
+ self._event.set()
816
+ self._pool = None
817
+ else:
818
+ if not success and self._success:
819
+ # only store first exception
820
+ self._success = False
821
+ self._value = result
822
+ if self._number_left == 0:
823
+ # only consider the result ready once all jobs are done
824
+ if self._error_callback:
825
+ self._error_callback(self._value)
826
+ del self._cache[self._job]
827
+ self._event.set()
828
+ self._pool = None
829
+
830
+ #
831
+ # Class whose instances are returned by `Pool.imap()`
832
+ #
833
+
834
+ class IMapIterator(object):
835
+
836
+ def __init__(self, pool):
837
+ self._pool = pool
838
+ self._cond = threading.Condition(threading.Lock())
839
+ self._job = next(job_counter)
840
+ self._cache = pool._cache
841
+ self._items = collections.deque()
842
+ self._index = 0
843
+ self._length = None
844
+ self._unsorted = {}
845
+ self._cache[self._job] = self
846
+
847
+ def __iter__(self):
848
+ return self
849
+
850
+ def next(self, timeout=None):
851
+ with self._cond:
852
+ try:
853
+ item = self._items.popleft()
854
+ except IndexError:
855
+ if self._index == self._length:
856
+ self._pool = None
857
+ raise StopIteration from None
858
+ self._cond.wait(timeout)
859
+ try:
860
+ item = self._items.popleft()
861
+ except IndexError:
862
+ if self._index == self._length:
863
+ self._pool = None
864
+ raise StopIteration from None
865
+ raise TimeoutError from None
866
+
867
+ success, value = item
868
+ if success:
869
+ return value
870
+ raise value
871
+
872
+ __next__ = next # XXX
873
+
874
+ def _set(self, i, obj):
875
+ with self._cond:
876
+ if self._index == i:
877
+ self._items.append(obj)
878
+ self._index += 1
879
+ while self._index in self._unsorted:
880
+ obj = self._unsorted.pop(self._index)
881
+ self._items.append(obj)
882
+ self._index += 1
883
+ self._cond.notify()
884
+ else:
885
+ self._unsorted[i] = obj
886
+
887
+ if self._index == self._length:
888
+ del self._cache[self._job]
889
+ self._pool = None
890
+
891
+ def _set_length(self, length):
892
+ with self._cond:
893
+ self._length = length
894
+ if self._index == self._length:
895
+ self._cond.notify()
896
+ del self._cache[self._job]
897
+ self._pool = None
898
+
899
+ #
900
+ # Class whose instances are returned by `Pool.imap_unordered()`
901
+ #
902
+
903
+ class IMapUnorderedIterator(IMapIterator):
904
+
905
+ def _set(self, i, obj):
906
+ with self._cond:
907
+ self._items.append(obj)
908
+ self._index += 1
909
+ self._cond.notify()
910
+ if self._index == self._length:
911
+ del self._cache[self._job]
912
+ self._pool = None
913
+
914
+ #
915
+ #
916
+ #
917
+
918
+ class ThreadPool(Pool):
919
+ _wrap_exception = False
920
+
921
+ @staticmethod
922
+ def Process(ctx, *args, **kwds):
923
+ from .dummy import Process
924
+ return Process(*args, **kwds)
925
+
926
+ def __init__(self, processes=None, initializer=None, initargs=()):
927
+ Pool.__init__(self, processes, initializer, initargs)
928
+
929
+ def _setup_queues(self):
930
+ self._inqueue = queue.SimpleQueue()
931
+ self._outqueue = queue.SimpleQueue()
932
+ self._quick_put = self._inqueue.put
933
+ self._quick_get = self._outqueue.get
934
+
935
+ def _get_sentinels(self):
936
+ return [self._change_notifier._reader]
937
+
938
+ @staticmethod
939
+ def _get_worker_sentinels(workers):
940
+ return []
941
+
942
+ @staticmethod
943
+ def _help_stuff_finish(inqueue, task_handler, size):
944
+ # drain inqueue, and put sentinels at its head to make workers finish
945
+ try:
946
+ while True:
947
+ inqueue.get(block=False)
948
+ except queue.Empty:
949
+ pass
950
+ for i in range(size):
951
+ inqueue.put(None)
952
+
953
+ def _wait_for_updates(self, sentinels, change_notifier, timeout):
954
+ time.sleep(timeout)
lib/python3.10/site-packages/multiprocess/popen_forkserver.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+
4
+ from .context import reduction, set_spawning_popen
5
+ if not reduction.HAVE_SEND_HANDLE:
6
+ raise ImportError('No support for sending fds between processes')
7
+ from . import forkserver
8
+ from . import popen_fork
9
+ from . import spawn
10
+ from . import util
11
+
12
+
13
+ __all__ = ['Popen']
14
+
15
+ #
16
+ # Wrapper for an fd used while launching a process
17
+ #
18
+
19
+ class _DupFd(object):
20
+ def __init__(self, ind):
21
+ self.ind = ind
22
+ def detach(self):
23
+ return forkserver.get_inherited_fds()[self.ind]
24
+
25
+ #
26
+ # Start child process using a server process
27
+ #
28
+
29
+ class Popen(popen_fork.Popen):
30
+ method = 'forkserver'
31
+ DupFd = _DupFd
32
+
33
+ def __init__(self, process_obj):
34
+ self._fds = []
35
+ super().__init__(process_obj)
36
+
37
+ def duplicate_for_child(self, fd):
38
+ self._fds.append(fd)
39
+ return len(self._fds) - 1
40
+
41
+ def _launch(self, process_obj):
42
+ prep_data = spawn.get_preparation_data(process_obj._name)
43
+ buf = io.BytesIO()
44
+ set_spawning_popen(self)
45
+ try:
46
+ reduction.dump(prep_data, buf)
47
+ reduction.dump(process_obj, buf)
48
+ finally:
49
+ set_spawning_popen(None)
50
+
51
+ self.sentinel, w = forkserver.connect_to_new_process(self._fds)
52
+ # Keep a duplicate of the data pipe's write end as a sentinel of the
53
+ # parent process used by the child process.
54
+ _parent_w = os.dup(w)
55
+ self.finalizer = util.Finalize(self, util.close_fds,
56
+ (_parent_w, self.sentinel))
57
+ with open(w, 'wb', closefd=True) as f:
58
+ f.write(buf.getbuffer())
59
+ self.pid = forkserver.read_signed(self.sentinel)
60
+
61
+ def poll(self, flag=os.WNOHANG):
62
+ if self.returncode is None:
63
+ from multiprocess.connection import wait
64
+ timeout = 0 if flag == os.WNOHANG else None
65
+ if not wait([self.sentinel], timeout):
66
+ return None
67
+ try:
68
+ self.returncode = forkserver.read_signed(self.sentinel)
69
+ except (OSError, EOFError):
70
+ # This should not happen usually, but perhaps the forkserver
71
+ # process itself got killed
72
+ self.returncode = 255
73
+
74
+ return self.returncode
lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+
4
+ from .context import reduction, set_spawning_popen
5
+ from . import popen_fork
6
+ from . import spawn
7
+ from . import util
8
+
9
+ __all__ = ['Popen']
10
+
11
+
12
+ #
13
+ # Wrapper for an fd used while launching a process
14
+ #
15
+
16
+ class _DupFd(object):
17
+ def __init__(self, fd):
18
+ self.fd = fd
19
+ def detach(self):
20
+ return self.fd
21
+
22
+ #
23
+ # Start child process using a fresh interpreter
24
+ #
25
+
26
+ class Popen(popen_fork.Popen):
27
+ method = 'spawn'
28
+ DupFd = _DupFd
29
+
30
+ def __init__(self, process_obj):
31
+ self._fds = []
32
+ super().__init__(process_obj)
33
+
34
+ def duplicate_for_child(self, fd):
35
+ self._fds.append(fd)
36
+ return fd
37
+
38
+ def _launch(self, process_obj):
39
+ from . import resource_tracker
40
+ tracker_fd = resource_tracker.getfd()
41
+ self._fds.append(tracker_fd)
42
+ prep_data = spawn.get_preparation_data(process_obj._name)
43
+ fp = io.BytesIO()
44
+ set_spawning_popen(self)
45
+ try:
46
+ reduction.dump(prep_data, fp)
47
+ reduction.dump(process_obj, fp)
48
+ finally:
49
+ set_spawning_popen(None)
50
+
51
+ parent_r = child_w = child_r = parent_w = None
52
+ try:
53
+ parent_r, child_w = os.pipe()
54
+ child_r, parent_w = os.pipe()
55
+ cmd = spawn.get_command_line(tracker_fd=tracker_fd,
56
+ pipe_handle=child_r)
57
+ self._fds.extend([child_r, child_w])
58
+ self.pid = util.spawnv_passfds(spawn.get_executable(),
59
+ cmd, self._fds)
60
+ self.sentinel = parent_r
61
+ with open(parent_w, 'wb', closefd=False) as f:
62
+ f.write(fp.getbuffer())
63
+ finally:
64
+ fds_to_close = []
65
+ for fd in (parent_r, parent_w):
66
+ if fd is not None:
67
+ fds_to_close.append(fd)
68
+ self.finalizer = util.Finalize(self, util.close_fds, fds_to_close)
69
+
70
+ for fd in (child_r, child_w):
71
+ if fd is not None:
72
+ os.close(fd)
lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import msvcrt
3
+ import signal
4
+ import sys
5
+ import _winapi
6
+
7
+ from .context import reduction, get_spawning_popen, set_spawning_popen
8
+ from . import spawn
9
+ from . import util
10
+
11
+ __all__ = ['Popen']
12
+
13
+ #
14
+ #
15
+ #
16
+
17
+ TERMINATE = 0x10000
18
+ WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
19
+ WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
20
+
21
+
22
+ def _path_eq(p1, p2):
23
+ return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
24
+
25
+ WINENV = not _path_eq(sys.executable, sys._base_executable)
26
+
27
+
28
+ def _close_handles(*handles):
29
+ for handle in handles:
30
+ _winapi.CloseHandle(handle)
31
+
32
+
33
+ #
34
+ # We define a Popen class similar to the one from subprocess, but
35
+ # whose constructor takes a process object as its argument.
36
+ #
37
+
38
+ class Popen(object):
39
+ '''
40
+ Start a subprocess to run the code of a process object
41
+ '''
42
+ method = 'spawn'
43
+
44
+ def __init__(self, process_obj):
45
+ prep_data = spawn.get_preparation_data(process_obj._name)
46
+
47
+ # read end of pipe will be duplicated by the child process
48
+ # -- see spawn_main() in spawn.py.
49
+ #
50
+ # bpo-33929: Previously, the read end of pipe was "stolen" by the child
51
+ # process, but it leaked a handle if the child process had been
52
+ # terminated before it could steal the handle from the parent process.
53
+ rhandle, whandle = _winapi.CreatePipe(None, 0)
54
+ wfd = msvcrt.open_osfhandle(whandle, 0)
55
+ cmd = spawn.get_command_line(parent_pid=os.getpid(),
56
+ pipe_handle=rhandle)
57
+ cmd = ' '.join('"%s"' % x for x in cmd)
58
+
59
+ python_exe = spawn.get_executable()
60
+
61
+ # bpo-35797: When running in a venv, we bypass the redirect
62
+ # executor and launch our base Python.
63
+ if WINENV and _path_eq(python_exe, sys.executable):
64
+ python_exe = sys._base_executable
65
+ env = os.environ.copy()
66
+ env["__PYVENV_LAUNCHER__"] = sys.executable
67
+ else:
68
+ env = None
69
+
70
+ with open(wfd, 'wb', closefd=True) as to_child:
71
+ # start process
72
+ try:
73
+ hp, ht, pid, tid = _winapi.CreateProcess(
74
+ python_exe, cmd,
75
+ None, None, False, 0, env, None, None)
76
+ _winapi.CloseHandle(ht)
77
+ except:
78
+ _winapi.CloseHandle(rhandle)
79
+ raise
80
+
81
+ # set attributes of self
82
+ self.pid = pid
83
+ self.returncode = None
84
+ self._handle = hp
85
+ self.sentinel = int(hp)
86
+ self.finalizer = util.Finalize(self, _close_handles,
87
+ (self.sentinel, int(rhandle)))
88
+
89
+ # send information to child
90
+ set_spawning_popen(self)
91
+ try:
92
+ reduction.dump(prep_data, to_child)
93
+ reduction.dump(process_obj, to_child)
94
+ finally:
95
+ set_spawning_popen(None)
96
+
97
+ def duplicate_for_child(self, handle):
98
+ assert self is get_spawning_popen()
99
+ return reduction.duplicate(handle, self.sentinel)
100
+
101
+ def wait(self, timeout=None):
102
+ if self.returncode is None:
103
+ if timeout is None:
104
+ msecs = _winapi.INFINITE
105
+ else:
106
+ msecs = max(0, int(timeout * 1000 + 0.5))
107
+
108
+ res = _winapi.WaitForSingleObject(int(self._handle), msecs)
109
+ if res == _winapi.WAIT_OBJECT_0:
110
+ code = _winapi.GetExitCodeProcess(self._handle)
111
+ if code == TERMINATE:
112
+ code = -signal.SIGTERM
113
+ self.returncode = code
114
+
115
+ return self.returncode
116
+
117
+ def poll(self):
118
+ return self.wait(timeout=0)
119
+
120
+ def terminate(self):
121
+ if self.returncode is None:
122
+ try:
123
+ _winapi.TerminateProcess(int(self._handle), TERMINATE)
124
+ except OSError:
125
+ if self.wait(timeout=1.0) is None:
126
+ raise
127
+
128
+ kill = terminate
129
+
130
+ def close(self):
131
+ self.finalizer()
lib/python3.10/site-packages/multiprocess/process.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing the `Process` class which emulates `threading.Thread`
3
+ #
4
+ # multiprocessing/process.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = ['BaseProcess', 'current_process', 'active_children',
11
+ 'parent_process']
12
+
13
+ #
14
+ # Imports
15
+ #
16
+
17
+ import os
18
+ import sys
19
+ import signal
20
+ import itertools
21
+ import threading
22
+ from _weakrefset import WeakSet
23
+
24
+ #
25
+ #
26
+ #
27
+
28
+ try:
29
+ ORIGINAL_DIR = os.path.abspath(os.getcwd())
30
+ except OSError:
31
+ ORIGINAL_DIR = None
32
+
33
+ #
34
+ # Public functions
35
+ #
36
+
37
+ def current_process():
38
+ '''
39
+ Return process object representing the current process
40
+ '''
41
+ return _current_process
42
+
43
+ def active_children():
44
+ '''
45
+ Return list of process objects corresponding to live child processes
46
+ '''
47
+ _cleanup()
48
+ return list(_children)
49
+
50
+
51
+ def parent_process():
52
+ '''
53
+ Return process object representing the parent process
54
+ '''
55
+ return _parent_process
56
+
57
+ #
58
+ #
59
+ #
60
+
61
+ def _cleanup():
62
+ # check for processes which have finished
63
+ for p in list(_children):
64
+ if p._popen.poll() is not None:
65
+ _children.discard(p)
66
+
67
+ #
68
+ # The `Process` class
69
+ #
70
+
71
+ class BaseProcess(object):
72
+ '''
73
+ Process objects represent activity that is run in a separate process
74
+
75
+ The class is analogous to `threading.Thread`
76
+ '''
77
+ def _Popen(self):
78
+ raise NotImplementedError
79
+
80
+ def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
81
+ *, daemon=None):
82
+ assert group is None, 'group argument must be None for now'
83
+ count = next(_process_counter)
84
+ self._identity = _current_process._identity + (count,)
85
+ self._config = _current_process._config.copy()
86
+ self._parent_pid = os.getpid()
87
+ self._parent_name = _current_process.name
88
+ self._popen = None
89
+ self._closed = False
90
+ self._target = target
91
+ self._args = tuple(args)
92
+ self._kwargs = dict(kwargs)
93
+ self._name = name or type(self).__name__ + '-' + \
94
+ ':'.join(str(i) for i in self._identity)
95
+ if daemon is not None:
96
+ self.daemon = daemon
97
+ _dangling.add(self)
98
+
99
+ def _check_closed(self):
100
+ if self._closed:
101
+ raise ValueError("process object is closed")
102
+
103
+ def run(self):
104
+ '''
105
+ Method to be run in sub-process; can be overridden in sub-class
106
+ '''
107
+ if self._target:
108
+ self._target(*self._args, **self._kwargs)
109
+
110
+ def start(self):
111
+ '''
112
+ Start child process
113
+ '''
114
+ self._check_closed()
115
+ assert self._popen is None, 'cannot start a process twice'
116
+ assert self._parent_pid == os.getpid(), \
117
+ 'can only start a process object created by current process'
118
+ assert not _current_process._config.get('daemon'), \
119
+ 'daemonic processes are not allowed to have children'
120
+ _cleanup()
121
+ self._popen = self._Popen(self)
122
+ self._sentinel = self._popen.sentinel
123
+ # Avoid a refcycle if the target function holds an indirect
124
+ # reference to the process object (see bpo-30775)
125
+ del self._target, self._args, self._kwargs
126
+ _children.add(self)
127
+
128
+ def terminate(self):
129
+ '''
130
+ Terminate process; sends SIGTERM signal or uses TerminateProcess()
131
+ '''
132
+ self._check_closed()
133
+ self._popen.terminate()
134
+
135
+ def kill(self):
136
+ '''
137
+ Terminate process; sends SIGKILL signal or uses TerminateProcess()
138
+ '''
139
+ self._check_closed()
140
+ self._popen.kill()
141
+
142
+ def join(self, timeout=None):
143
+ '''
144
+ Wait until child process terminates
145
+ '''
146
+ self._check_closed()
147
+ assert self._parent_pid == os.getpid(), 'can only join a child process'
148
+ assert self._popen is not None, 'can only join a started process'
149
+ res = self._popen.wait(timeout)
150
+ if res is not None:
151
+ _children.discard(self)
152
+
153
+ def is_alive(self):
154
+ '''
155
+ Return whether process is alive
156
+ '''
157
+ self._check_closed()
158
+ if self is _current_process:
159
+ return True
160
+ assert self._parent_pid == os.getpid(), 'can only test a child process'
161
+
162
+ if self._popen is None:
163
+ return False
164
+
165
+ returncode = self._popen.poll()
166
+ if returncode is None:
167
+ return True
168
+ else:
169
+ _children.discard(self)
170
+ return False
171
+
172
+ def close(self):
173
+ '''
174
+ Close the Process object.
175
+
176
+ This method releases resources held by the Process object. It is
177
+ an error to call this method if the child process is still running.
178
+ '''
179
+ if self._popen is not None:
180
+ if self._popen.poll() is None:
181
+ raise ValueError("Cannot close a process while it is still running. "
182
+ "You should first call join() or terminate().")
183
+ self._popen.close()
184
+ self._popen = None
185
+ del self._sentinel
186
+ _children.discard(self)
187
+ self._closed = True
188
+
189
+ @property
190
+ def name(self):
191
+ return self._name
192
+
193
+ @name.setter
194
+ def name(self, name):
195
+ assert isinstance(name, str), 'name must be a string'
196
+ self._name = name
197
+
198
+ @property
199
+ def daemon(self):
200
+ '''
201
+ Return whether process is a daemon
202
+ '''
203
+ return self._config.get('daemon', False)
204
+
205
+ @daemon.setter
206
+ def daemon(self, daemonic):
207
+ '''
208
+ Set whether process is a daemon
209
+ '''
210
+ assert self._popen is None, 'process has already started'
211
+ self._config['daemon'] = daemonic
212
+
213
+ @property
214
+ def authkey(self):
215
+ return self._config['authkey']
216
+
217
+ @authkey.setter
218
+ def authkey(self, authkey):
219
+ '''
220
+ Set authorization key of process
221
+ '''
222
+ self._config['authkey'] = AuthenticationString(authkey)
223
+
224
+ @property
225
+ def exitcode(self):
226
+ '''
227
+ Return exit code of process or `None` if it has yet to stop
228
+ '''
229
+ self._check_closed()
230
+ if self._popen is None:
231
+ return self._popen
232
+ return self._popen.poll()
233
+
234
+ @property
235
+ def ident(self):
236
+ '''
237
+ Return identifier (PID) of process or `None` if it has yet to start
238
+ '''
239
+ self._check_closed()
240
+ if self is _current_process:
241
+ return os.getpid()
242
+ else:
243
+ return self._popen and self._popen.pid
244
+
245
+ pid = ident
246
+
247
+ @property
248
+ def sentinel(self):
249
+ '''
250
+ Return a file descriptor (Unix) or handle (Windows) suitable for
251
+ waiting for process termination.
252
+ '''
253
+ self._check_closed()
254
+ try:
255
+ return self._sentinel
256
+ except AttributeError:
257
+ raise ValueError("process not started") from None
258
+
259
+ def __repr__(self):
260
+ exitcode = None
261
+ if self is _current_process:
262
+ status = 'started'
263
+ elif self._closed:
264
+ status = 'closed'
265
+ elif self._parent_pid != os.getpid():
266
+ status = 'unknown'
267
+ elif self._popen is None:
268
+ status = 'initial'
269
+ else:
270
+ exitcode = self._popen.poll()
271
+ if exitcode is not None:
272
+ status = 'stopped'
273
+ else:
274
+ status = 'started'
275
+
276
+ info = [type(self).__name__, 'name=%r' % self._name]
277
+ if self._popen is not None:
278
+ info.append('pid=%s' % self._popen.pid)
279
+ info.append('parent=%s' % self._parent_pid)
280
+ info.append(status)
281
+ if exitcode is not None:
282
+ exitcode = _exitcode_to_name.get(exitcode, exitcode)
283
+ info.append('exitcode=%s' % exitcode)
284
+ if self.daemon:
285
+ info.append('daemon')
286
+ return '<%s>' % ' '.join(info)
287
+
288
+ ##
289
+
290
+ def _bootstrap(self, parent_sentinel=None):
291
+ from . import util, context
292
+ global _current_process, _parent_process, _process_counter, _children
293
+
294
+ try:
295
+ if self._start_method is not None:
296
+ context._force_start_method(self._start_method)
297
+ _process_counter = itertools.count(1)
298
+ _children = set()
299
+ util._close_stdin()
300
+ old_process = _current_process
301
+ _current_process = self
302
+ _parent_process = _ParentProcess(
303
+ self._parent_name, self._parent_pid, parent_sentinel)
304
+ if threading._HAVE_THREAD_NATIVE_ID:
305
+ threading.main_thread()._set_native_id()
306
+ try:
307
+ util._finalizer_registry.clear()
308
+ util._run_after_forkers()
309
+ finally:
310
+ # delay finalization of the old process object until after
311
+ # _run_after_forkers() is executed
312
+ del old_process
313
+ util.info('child process calling self.run()')
314
+ try:
315
+ self.run()
316
+ exitcode = 0
317
+ finally:
318
+ util._exit_function()
319
+ except SystemExit as e:
320
+ if e.code is None:
321
+ exitcode = 0
322
+ elif isinstance(e.code, int):
323
+ exitcode = e.code
324
+ else:
325
+ sys.stderr.write(str(e.code) + '\n')
326
+ exitcode = 1
327
+ except:
328
+ exitcode = 1
329
+ import traceback
330
+ sys.stderr.write('Process %s:\n' % self.name)
331
+ traceback.print_exc()
332
+ finally:
333
+ threading._shutdown()
334
+ util.info('process exiting with exitcode %d' % exitcode)
335
+ util._flush_std_streams()
336
+
337
+ return exitcode
338
+
339
+ #
340
+ # We subclass bytes to avoid accidental transmission of auth keys over network
341
+ #
342
+
343
+ class AuthenticationString(bytes):
344
+ def __reduce__(self):
345
+ from .context import get_spawning_popen
346
+ if get_spawning_popen() is None:
347
+ raise TypeError(
348
+ 'Pickling an AuthenticationString object is '
349
+ 'disallowed for security reasons'
350
+ )
351
+ return AuthenticationString, (bytes(self),)
352
+
353
+
354
+ #
355
+ # Create object representing the parent process
356
+ #
357
+
358
+ class _ParentProcess(BaseProcess):
359
+
360
+ def __init__(self, name, pid, sentinel):
361
+ self._identity = ()
362
+ self._name = name
363
+ self._pid = pid
364
+ self._parent_pid = None
365
+ self._popen = None
366
+ self._closed = False
367
+ self._sentinel = sentinel
368
+ self._config = {}
369
+
370
+ def is_alive(self):
371
+ from multiprocessing.connection import wait
372
+ return not wait([self._sentinel], timeout=0)
373
+
374
+ @property
375
+ def ident(self):
376
+ return self._pid
377
+
378
+ def join(self, timeout=None):
379
+ '''
380
+ Wait until parent process terminates
381
+ '''
382
+ from multiprocessing.connection import wait
383
+ wait([self._sentinel], timeout=timeout)
384
+
385
+ pid = ident
386
+
387
+ #
388
+ # Create object representing the main process
389
+ #
390
+
391
+ class _MainProcess(BaseProcess):
392
+
393
+ def __init__(self):
394
+ self._identity = ()
395
+ self._name = 'MainProcess'
396
+ self._parent_pid = None
397
+ self._popen = None
398
+ self._closed = False
399
+ self._config = {'authkey': AuthenticationString(os.urandom(32)),
400
+ 'semprefix': '/mp'}
401
+ # Note that some versions of FreeBSD only allow named
402
+ # semaphores to have names of up to 14 characters. Therefore
403
+ # we choose a short prefix.
404
+ #
405
+ # On MacOSX in a sandbox it may be necessary to use a
406
+ # different prefix -- see #19478.
407
+ #
408
+ # Everything in self._config will be inherited by descendant
409
+ # processes.
410
+
411
+ def close(self):
412
+ pass
413
+
414
+
415
+ _parent_process = None
416
+ _current_process = _MainProcess()
417
+ _process_counter = itertools.count(1)
418
+ _children = set()
419
+ del _MainProcess
420
+
421
+ #
422
+ # Give names to some return codes
423
+ #
424
+
425
+ _exitcode_to_name = {}
426
+
427
+ for name, signum in list(signal.__dict__.items()):
428
+ if name[:3]=='SIG' and '_' not in name:
429
+ _exitcode_to_name[-signum] = f'-{name}'
430
+
431
+ # For debug and leak testing
432
+ _dangling = WeakSet()
lib/python3.10/site-packages/multiprocess/queues.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module implementing queues
3
+ #
4
+ # multiprocessing/queues.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
11
+
12
+ import sys
13
+ import os
14
+ import threading
15
+ import collections
16
+ import time
17
+ import types
18
+ import weakref
19
+ import errno
20
+
21
+ from queue import Empty, Full
22
+
23
+ try:
24
+ import _multiprocess as _multiprocessing
25
+ except ImportError:
26
+ import _multiprocessing
27
+
28
+ from . import connection
29
+ from . import context
30
+ _ForkingPickler = context.reduction.ForkingPickler
31
+
32
+ from .util import debug, info, Finalize, register_after_fork, is_exiting
33
+
34
+ #
35
+ # Queue type using a pipe, buffer and thread
36
+ #
37
+
38
+ class Queue(object):
39
+
40
+ def __init__(self, maxsize=0, *, ctx):
41
+ if maxsize <= 0:
42
+ # Can raise ImportError (see issues #3770 and #23400)
43
+ from .synchronize import SEM_VALUE_MAX as maxsize
44
+ self._maxsize = maxsize
45
+ self._reader, self._writer = connection.Pipe(duplex=False)
46
+ self._rlock = ctx.Lock()
47
+ self._opid = os.getpid()
48
+ if sys.platform == 'win32':
49
+ self._wlock = None
50
+ else:
51
+ self._wlock = ctx.Lock()
52
+ self._sem = ctx.BoundedSemaphore(maxsize)
53
+ # For use by concurrent.futures
54
+ self._ignore_epipe = False
55
+ self._reset()
56
+
57
+ if sys.platform != 'win32':
58
+ register_after_fork(self, Queue._after_fork)
59
+
60
+ def __getstate__(self):
61
+ context.assert_spawning(self)
62
+ return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
63
+ self._rlock, self._wlock, self._sem, self._opid)
64
+
65
+ def __setstate__(self, state):
66
+ (self._ignore_epipe, self._maxsize, self._reader, self._writer,
67
+ self._rlock, self._wlock, self._sem, self._opid) = state
68
+ self._reset()
69
+
70
+ def _after_fork(self):
71
+ debug('Queue._after_fork()')
72
+ self._reset(after_fork=True)
73
+
74
+ def _reset(self, after_fork=False):
75
+ if after_fork:
76
+ self._notempty._at_fork_reinit()
77
+ else:
78
+ self._notempty = threading.Condition(threading.Lock())
79
+ self._buffer = collections.deque()
80
+ self._thread = None
81
+ self._jointhread = None
82
+ self._joincancelled = False
83
+ self._closed = False
84
+ self._close = None
85
+ self._send_bytes = self._writer.send_bytes
86
+ self._recv_bytes = self._reader.recv_bytes
87
+ self._poll = self._reader.poll
88
+
89
+ def put(self, obj, block=True, timeout=None):
90
+ if self._closed:
91
+ raise ValueError(f"Queue {self!r} is closed")
92
+ if not self._sem.acquire(block, timeout):
93
+ raise Full
94
+
95
+ with self._notempty:
96
+ if self._thread is None:
97
+ self._start_thread()
98
+ self._buffer.append(obj)
99
+ self._notempty.notify()
100
+
101
+ def get(self, block=True, timeout=None):
102
+ if self._closed:
103
+ raise ValueError(f"Queue {self!r} is closed")
104
+ if block and timeout is None:
105
+ with self._rlock:
106
+ res = self._recv_bytes()
107
+ self._sem.release()
108
+ else:
109
+ if block:
110
+ deadline = getattr(time,'monotonic',time.time)() + timeout
111
+ if not self._rlock.acquire(block, timeout):
112
+ raise Empty
113
+ try:
114
+ if block:
115
+ timeout = deadline - getattr(time,'monotonic',time.time)()
116
+ if not self._poll(timeout):
117
+ raise Empty
118
+ elif not self._poll():
119
+ raise Empty
120
+ res = self._recv_bytes()
121
+ self._sem.release()
122
+ finally:
123
+ self._rlock.release()
124
+ # unserialize the data after having released the lock
125
+ return _ForkingPickler.loads(res)
126
+
127
+ def qsize(self):
128
+ # Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
129
+ return self._maxsize - self._sem._semlock._get_value()
130
+
131
+ def empty(self):
132
+ return not self._poll()
133
+
134
+ def full(self):
135
+ return self._sem._semlock._is_zero()
136
+
137
+ def get_nowait(self):
138
+ return self.get(False)
139
+
140
+ def put_nowait(self, obj):
141
+ return self.put(obj, False)
142
+
143
+ def close(self):
144
+ self._closed = True
145
+ try:
146
+ self._reader.close()
147
+ finally:
148
+ close = self._close
149
+ if close:
150
+ self._close = None
151
+ close()
152
+
153
+ def join_thread(self):
154
+ debug('Queue.join_thread()')
155
+ assert self._closed, "Queue {0!r} not closed".format(self)
156
+ if self._jointhread:
157
+ self._jointhread()
158
+
159
+ def cancel_join_thread(self):
160
+ debug('Queue.cancel_join_thread()')
161
+ self._joincancelled = True
162
+ try:
163
+ self._jointhread.cancel()
164
+ except AttributeError:
165
+ pass
166
+
167
+ def _start_thread(self):
168
+ debug('Queue._start_thread()')
169
+
170
+ # Start thread which transfers data from buffer to pipe
171
+ self._buffer.clear()
172
+ self._thread = threading.Thread(
173
+ target=Queue._feed,
174
+ args=(self._buffer, self._notempty, self._send_bytes,
175
+ self._wlock, self._writer.close, self._ignore_epipe,
176
+ self._on_queue_feeder_error, self._sem),
177
+ name='QueueFeederThread'
178
+ )
179
+ self._thread.daemon = True
180
+
181
+ debug('doing self._thread.start()')
182
+ self._thread.start()
183
+ debug('... done self._thread.start()')
184
+
185
+ if not self._joincancelled:
186
+ self._jointhread = Finalize(
187
+ self._thread, Queue._finalize_join,
188
+ [weakref.ref(self._thread)],
189
+ exitpriority=-5
190
+ )
191
+
192
+ # Send sentinel to the thread queue object when garbage collected
193
+ self._close = Finalize(
194
+ self, Queue._finalize_close,
195
+ [self._buffer, self._notempty],
196
+ exitpriority=10
197
+ )
198
+
199
+ @staticmethod
200
+ def _finalize_join(twr):
201
+ debug('joining queue thread')
202
+ thread = twr()
203
+ if thread is not None:
204
+ thread.join()
205
+ debug('... queue thread joined')
206
+ else:
207
+ debug('... queue thread already dead')
208
+
209
+ @staticmethod
210
+ def _finalize_close(buffer, notempty):
211
+ debug('telling queue thread to quit')
212
+ with notempty:
213
+ buffer.append(_sentinel)
214
+ notempty.notify()
215
+
216
+ @staticmethod
217
+ def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe,
218
+ onerror, queue_sem):
219
+ debug('starting thread to feed data to pipe')
220
+ nacquire = notempty.acquire
221
+ nrelease = notempty.release
222
+ nwait = notempty.wait
223
+ bpopleft = buffer.popleft
224
+ sentinel = _sentinel
225
+ if sys.platform != 'win32':
226
+ wacquire = writelock.acquire
227
+ wrelease = writelock.release
228
+ else:
229
+ wacquire = None
230
+
231
+ while 1:
232
+ try:
233
+ nacquire()
234
+ try:
235
+ if not buffer:
236
+ nwait()
237
+ finally:
238
+ nrelease()
239
+ try:
240
+ while 1:
241
+ obj = bpopleft()
242
+ if obj is sentinel:
243
+ debug('feeder thread got sentinel -- exiting')
244
+ close()
245
+ return
246
+
247
+ # serialize the data before acquiring the lock
248
+ obj = _ForkingPickler.dumps(obj)
249
+ if wacquire is None:
250
+ send_bytes(obj)
251
+ else:
252
+ wacquire()
253
+ try:
254
+ send_bytes(obj)
255
+ finally:
256
+ wrelease()
257
+ except IndexError:
258
+ pass
259
+ except Exception as e:
260
+ if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
261
+ return
262
+ # Since this runs in a daemon thread the resources it uses
263
+ # may be become unusable while the process is cleaning up.
264
+ # We ignore errors which happen after the process has
265
+ # started to cleanup.
266
+ if is_exiting():
267
+ info('error in queue thread: %s', e)
268
+ return
269
+ else:
270
+ # Since the object has not been sent in the queue, we need
271
+ # to decrease the size of the queue. The error acts as
272
+ # if the object had been silently removed from the queue
273
+ # and this step is necessary to have a properly working
274
+ # queue.
275
+ queue_sem.release()
276
+ onerror(e, obj)
277
+
278
+ @staticmethod
279
+ def _on_queue_feeder_error(e, obj):
280
+ """
281
+ Private API hook called when feeding data in the background thread
282
+ raises an exception. For overriding by concurrent.futures.
283
+ """
284
+ import traceback
285
+ traceback.print_exc()
286
+
287
+
288
+ _sentinel = object()
289
+
290
+ #
291
+ # A queue type which also supports join() and task_done() methods
292
+ #
293
+ # Note that if you do not call task_done() for each finished task then
294
+ # eventually the counter's semaphore may overflow causing Bad Things
295
+ # to happen.
296
+ #
297
+
298
+ class JoinableQueue(Queue):
299
+
300
+ def __init__(self, maxsize=0, *, ctx):
301
+ Queue.__init__(self, maxsize, ctx=ctx)
302
+ self._unfinished_tasks = ctx.Semaphore(0)
303
+ self._cond = ctx.Condition()
304
+
305
+ def __getstate__(self):
306
+ return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
307
+
308
+ def __setstate__(self, state):
309
+ Queue.__setstate__(self, state[:-2])
310
+ self._cond, self._unfinished_tasks = state[-2:]
311
+
312
+ def put(self, obj, block=True, timeout=None):
313
+ if self._closed:
314
+ raise ValueError(f"Queue {self!r} is closed")
315
+ if not self._sem.acquire(block, timeout):
316
+ raise Full
317
+
318
+ with self._notempty, self._cond:
319
+ if self._thread is None:
320
+ self._start_thread()
321
+ self._buffer.append(obj)
322
+ self._unfinished_tasks.release()
323
+ self._notempty.notify()
324
+
325
+ def task_done(self):
326
+ with self._cond:
327
+ if not self._unfinished_tasks.acquire(False):
328
+ raise ValueError('task_done() called too many times')
329
+ if self._unfinished_tasks._semlock._is_zero():
330
+ self._cond.notify_all()
331
+
332
+ def join(self):
333
+ with self._cond:
334
+ if not self._unfinished_tasks._semlock._is_zero():
335
+ self._cond.wait()
336
+
337
+ #
338
+ # Simplified Queue type -- really just a locked pipe
339
+ #
340
+
341
+ class SimpleQueue(object):
342
+
343
+ def __init__(self, *, ctx):
344
+ self._reader, self._writer = connection.Pipe(duplex=False)
345
+ self._rlock = ctx.Lock()
346
+ self._poll = self._reader.poll
347
+ if sys.platform == 'win32':
348
+ self._wlock = None
349
+ else:
350
+ self._wlock = ctx.Lock()
351
+
352
+ def close(self):
353
+ self._reader.close()
354
+ self._writer.close()
355
+
356
+ def empty(self):
357
+ return not self._poll()
358
+
359
+ def __getstate__(self):
360
+ context.assert_spawning(self)
361
+ return (self._reader, self._writer, self._rlock, self._wlock)
362
+
363
+ def __setstate__(self, state):
364
+ (self._reader, self._writer, self._rlock, self._wlock) = state
365
+ self._poll = self._reader.poll
366
+
367
+ def get(self):
368
+ with self._rlock:
369
+ res = self._reader.recv_bytes()
370
+ # unserialize the data after having released the lock
371
+ return _ForkingPickler.loads(res)
372
+
373
+ def put(self, obj):
374
+ # serialize the data before acquiring the lock
375
+ obj = _ForkingPickler.dumps(obj)
376
+ if self._wlock is None:
377
+ # writes to a message oriented win32 pipe are atomic
378
+ self._writer.send_bytes(obj)
379
+ else:
380
+ with self._wlock:
381
+ self._writer.send_bytes(obj)
382
+
383
+ __class_getitem__ = classmethod(types.GenericAlias)
lib/python3.10/site-packages/multiprocess/resource_sharer.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # We use a background thread for sharing fds on Unix, and for sharing sockets on
3
+ # Windows.
4
+ #
5
+ # A client which wants to pickle a resource registers it with the resource
6
+ # sharer and gets an identifier in return. The unpickling process will connect
7
+ # to the resource sharer, sends the identifier and its pid, and then receives
8
+ # the resource.
9
+ #
10
+
11
+ import os
12
+ import signal
13
+ import socket
14
+ import sys
15
+ import threading
16
+
17
+ from . import process
18
+ from .context import reduction
19
+ from . import util
20
+
21
+ __all__ = ['stop']
22
+
23
+
24
+ if sys.platform == 'win32':
25
+ __all__ += ['DupSocket']
26
+
27
+ class DupSocket(object):
28
+ '''Picklable wrapper for a socket.'''
29
+ def __init__(self, sock):
30
+ new_sock = sock.dup()
31
+ def send(conn, pid):
32
+ share = new_sock.share(pid)
33
+ conn.send_bytes(share)
34
+ self._id = _resource_sharer.register(send, new_sock.close)
35
+
36
+ def detach(self):
37
+ '''Get the socket. This should only be called once.'''
38
+ with _resource_sharer.get_connection(self._id) as conn:
39
+ share = conn.recv_bytes()
40
+ return socket.fromshare(share)
41
+
42
+ else:
43
+ __all__ += ['DupFd']
44
+
45
+ class DupFd(object):
46
+ '''Wrapper for fd which can be used at any time.'''
47
+ def __init__(self, fd):
48
+ new_fd = os.dup(fd)
49
+ def send(conn, pid):
50
+ reduction.send_handle(conn, new_fd, pid)
51
+ def close():
52
+ os.close(new_fd)
53
+ self._id = _resource_sharer.register(send, close)
54
+
55
+ def detach(self):
56
+ '''Get the fd. This should only be called once.'''
57
+ with _resource_sharer.get_connection(self._id) as conn:
58
+ return reduction.recv_handle(conn)
59
+
60
+
61
+ class _ResourceSharer(object):
62
+ '''Manager for resources using background thread.'''
63
+ def __init__(self):
64
+ self._key = 0
65
+ self._cache = {}
66
+ self._lock = threading.Lock()
67
+ self._listener = None
68
+ self._address = None
69
+ self._thread = None
70
+ util.register_after_fork(self, _ResourceSharer._afterfork)
71
+
72
+ def register(self, send, close):
73
+ '''Register resource, returning an identifier.'''
74
+ with self._lock:
75
+ if self._address is None:
76
+ self._start()
77
+ self._key += 1
78
+ self._cache[self._key] = (send, close)
79
+ return (self._address, self._key)
80
+
81
+ @staticmethod
82
+ def get_connection(ident):
83
+ '''Return connection from which to receive identified resource.'''
84
+ from .connection import Client
85
+ address, key = ident
86
+ c = Client(address, authkey=process.current_process().authkey)
87
+ c.send((key, os.getpid()))
88
+ return c
89
+
90
+ def stop(self, timeout=None):
91
+ '''Stop the background thread and clear registered resources.'''
92
+ from .connection import Client
93
+ with self._lock:
94
+ if self._address is not None:
95
+ c = Client(self._address,
96
+ authkey=process.current_process().authkey)
97
+ c.send(None)
98
+ c.close()
99
+ self._thread.join(timeout)
100
+ if self._thread.is_alive():
101
+ util.sub_warning('_ResourceSharer thread did '
102
+ 'not stop when asked')
103
+ self._listener.close()
104
+ self._thread = None
105
+ self._address = None
106
+ self._listener = None
107
+ for key, (send, close) in self._cache.items():
108
+ close()
109
+ self._cache.clear()
110
+
111
+ def _afterfork(self):
112
+ for key, (send, close) in self._cache.items():
113
+ close()
114
+ self._cache.clear()
115
+ self._lock._at_fork_reinit()
116
+ if self._listener is not None:
117
+ self._listener.close()
118
+ self._listener = None
119
+ self._address = None
120
+ self._thread = None
121
+
122
+ def _start(self):
123
+ from .connection import Listener
124
+ assert self._listener is None, "Already have Listener"
125
+ util.debug('starting listener and thread for sending handles')
126
+ self._listener = Listener(authkey=process.current_process().authkey)
127
+ self._address = self._listener.address
128
+ t = threading.Thread(target=self._serve)
129
+ t.daemon = True
130
+ t.start()
131
+ self._thread = t
132
+
133
+ def _serve(self):
134
+ if hasattr(signal, 'pthread_sigmask'):
135
+ signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
136
+ while 1:
137
+ try:
138
+ with self._listener.accept() as conn:
139
+ msg = conn.recv()
140
+ if msg is None:
141
+ break
142
+ key, destination_pid = msg
143
+ send, close = self._cache.pop(key)
144
+ try:
145
+ send(conn, destination_pid)
146
+ finally:
147
+ close()
148
+ except:
149
+ if not util.is_exiting():
150
+ sys.excepthook(*sys.exc_info())
151
+
152
+
153
+ _resource_sharer = _ResourceSharer()
154
+ stop = _resource_sharer.stop
lib/python3.10/site-packages/multiprocess/resource_tracker.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Server process to keep track of unlinked resources (like shared memory
3
+ # segments, semaphores etc.) and clean them.
4
+ #
5
+ # On Unix we run a server process which keeps track of unlinked
6
+ # resources. The server ignores SIGINT and SIGTERM and reads from a
7
+ # pipe. Every other process of the program has a copy of the writable
8
+ # end of the pipe, so we get EOF when all other processes have exited.
9
+ # Then the server process unlinks any remaining resource names.
10
+ #
11
+ # This is important because there may be system limits for such resources: for
12
+ # instance, the system only supports a limited number of named semaphores, and
13
+ # shared-memory segments live in the RAM. If a python process leaks such a
14
+ # resource, this resource will not be removed till the next reboot. Without
15
+ # this resource tracker process, "killall python" would probably leave unlinked
16
+ # resources.
17
+
18
+ import os
19
+ import signal
20
+ import sys
21
+ import threading
22
+ import warnings
23
+
24
+ from . import spawn
25
+ from . import util
26
+
27
+ __all__ = ['ensure_running', 'register', 'unregister']
28
+
29
+ _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask')
30
+ _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
31
+
32
+ _CLEANUP_FUNCS = {
33
+ 'noop': lambda: None,
34
+ }
35
+
36
+ if os.name == 'posix':
37
+ try:
38
+ import _multiprocess as _multiprocessing
39
+ except ImportError:
40
+ import _multiprocessing
41
+ import _posixshmem
42
+
43
+ _CLEANUP_FUNCS.update({
44
+ 'semaphore': _multiprocessing.sem_unlink,
45
+ 'shared_memory': _posixshmem.shm_unlink,
46
+ })
47
+
48
+
49
+ class ResourceTracker(object):
50
+
51
+ def __init__(self):
52
+ self._lock = threading.Lock()
53
+ self._fd = None
54
+ self._pid = None
55
+
56
+ def _stop(self):
57
+ with self._lock:
58
+ if self._fd is None:
59
+ # not running
60
+ return
61
+
62
+ # closing the "alive" file descriptor stops main()
63
+ os.close(self._fd)
64
+ self._fd = None
65
+
66
+ os.waitpid(self._pid, 0)
67
+ self._pid = None
68
+
69
+ def getfd(self):
70
+ self.ensure_running()
71
+ return self._fd
72
+
73
+ def ensure_running(self):
74
+ '''Make sure that resource tracker process is running.
75
+
76
+ This can be run from any process. Usually a child process will use
77
+ the resource created by its parent.'''
78
+ with self._lock:
79
+ if self._fd is not None:
80
+ # resource tracker was launched before, is it still running?
81
+ if self._check_alive():
82
+ # => still alive
83
+ return
84
+ # => dead, launch it again
85
+ os.close(self._fd)
86
+
87
+ # Clean-up to avoid dangling processes.
88
+ try:
89
+ # _pid can be None if this process is a child from another
90
+ # python process, which has started the resource_tracker.
91
+ if self._pid is not None:
92
+ os.waitpid(self._pid, 0)
93
+ except ChildProcessError:
94
+ # The resource_tracker has already been terminated.
95
+ pass
96
+ self._fd = None
97
+ self._pid = None
98
+
99
+ warnings.warn('resource_tracker: process died unexpectedly, '
100
+ 'relaunching. Some resources might leak.')
101
+
102
+ fds_to_pass = []
103
+ try:
104
+ fds_to_pass.append(sys.stderr.fileno())
105
+ except Exception:
106
+ pass
107
+ cmd = 'from multiprocess.resource_tracker import main;main(%d)'
108
+ r, w = os.pipe()
109
+ try:
110
+ fds_to_pass.append(r)
111
+ # process will out live us, so no need to wait on pid
112
+ exe = spawn.get_executable()
113
+ args = [exe] + util._args_from_interpreter_flags()
114
+ args += ['-c', cmd % r]
115
+ # bpo-33613: Register a signal mask that will block the signals.
116
+ # This signal mask will be inherited by the child that is going
117
+ # to be spawned and will protect the child from a race condition
118
+ # that can make the child die before it registers signal handlers
119
+ # for SIGINT and SIGTERM. The mask is unregistered after spawning
120
+ # the child.
121
+ try:
122
+ if _HAVE_SIGMASK:
123
+ signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS)
124
+ pid = util.spawnv_passfds(exe, args, fds_to_pass)
125
+ finally:
126
+ if _HAVE_SIGMASK:
127
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
128
+ except:
129
+ os.close(w)
130
+ raise
131
+ else:
132
+ self._fd = w
133
+ self._pid = pid
134
+ finally:
135
+ os.close(r)
136
+
137
+ def _check_alive(self):
138
+ '''Check that the pipe has not been closed by sending a probe.'''
139
+ try:
140
+ # We cannot use send here as it calls ensure_running, creating
141
+ # a cycle.
142
+ os.write(self._fd, b'PROBE:0:noop\n')
143
+ except OSError:
144
+ return False
145
+ else:
146
+ return True
147
+
148
+ def register(self, name, rtype):
149
+ '''Register name of resource with resource tracker.'''
150
+ self._send('REGISTER', name, rtype)
151
+
152
+ def unregister(self, name, rtype):
153
+ '''Unregister name of resource with resource tracker.'''
154
+ self._send('UNREGISTER', name, rtype)
155
+
156
+ def _send(self, cmd, name, rtype):
157
+ self.ensure_running()
158
+ msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
159
+ if len(name) > 512:
160
+ # posix guarantees that writes to a pipe of less than PIPE_BUF
161
+ # bytes are atomic, and that PIPE_BUF >= 512
162
+ raise ValueError('name too long')
163
+ nbytes = os.write(self._fd, msg)
164
+ assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format(
165
+ nbytes, len(msg))
166
+
167
+
168
+ _resource_tracker = ResourceTracker()
169
+ ensure_running = _resource_tracker.ensure_running
170
+ register = _resource_tracker.register
171
+ unregister = _resource_tracker.unregister
172
+ getfd = _resource_tracker.getfd
173
+
174
+ def main(fd):
175
+ '''Run resource tracker.'''
176
+ # protect the process from ^C and "killall python" etc
177
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
178
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
179
+ if _HAVE_SIGMASK:
180
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
181
+
182
+ for f in (sys.stdin, sys.stdout):
183
+ try:
184
+ f.close()
185
+ except Exception:
186
+ pass
187
+
188
+ cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()}
189
+ try:
190
+ # keep track of registered/unregistered resources
191
+ with open(fd, 'rb') as f:
192
+ for line in f:
193
+ try:
194
+ cmd, name, rtype = line.strip().decode('ascii').split(':')
195
+ cleanup_func = _CLEANUP_FUNCS.get(rtype, None)
196
+ if cleanup_func is None:
197
+ raise ValueError(
198
+ f'Cannot register {name} for automatic cleanup: '
199
+ f'unknown resource type {rtype}')
200
+
201
+ if cmd == 'REGISTER':
202
+ cache[rtype].add(name)
203
+ elif cmd == 'UNREGISTER':
204
+ cache[rtype].remove(name)
205
+ elif cmd == 'PROBE':
206
+ pass
207
+ else:
208
+ raise RuntimeError('unrecognized command %r' % cmd)
209
+ except Exception:
210
+ try:
211
+ sys.excepthook(*sys.exc_info())
212
+ except:
213
+ pass
214
+ finally:
215
+ # all processes have terminated; cleanup any remaining resources
216
+ for rtype, rtype_cache in cache.items():
217
+ if rtype_cache:
218
+ try:
219
+ warnings.warn('resource_tracker: There appear to be %d '
220
+ 'leaked %s objects to clean up at shutdown' %
221
+ (len(rtype_cache), rtype))
222
+ except Exception:
223
+ pass
224
+ for name in rtype_cache:
225
+ # For some reason the process which created and registered this
226
+ # resource has failed to unregister it. Presumably it has
227
+ # died. We therefore unlink it.
228
+ try:
229
+ try:
230
+ _CLEANUP_FUNCS[rtype](name)
231
+ except Exception as e:
232
+ warnings.warn('resource_tracker: %r: %s' % (name, e))
233
+ finally:
234
+ pass
lib/python3.10/site-packages/multiprocess/shared_memory.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Provides shared memory for direct access across processes.
2
+
3
+ The API of this package is currently provisional. Refer to the
4
+ documentation for details.
5
+ """
6
+
7
+
8
+ __all__ = [ 'SharedMemory', 'ShareableList' ]
9
+
10
+
11
+ from functools import partial
12
+ import mmap
13
+ import os
14
+ import errno
15
+ import struct
16
+ import secrets
17
+ import types
18
+
19
+ if os.name == "nt":
20
+ import _winapi
21
+ _USE_POSIX = False
22
+ else:
23
+ import _posixshmem
24
+ _USE_POSIX = True
25
+
26
+
27
+ _O_CREX = os.O_CREAT | os.O_EXCL
28
+
29
+ # FreeBSD (and perhaps other BSDs) limit names to 14 characters.
30
+ _SHM_SAFE_NAME_LENGTH = 14
31
+
32
+ # Shared memory block name prefix
33
+ if _USE_POSIX:
34
+ _SHM_NAME_PREFIX = '/psm_'
35
+ else:
36
+ _SHM_NAME_PREFIX = 'wnsm_'
37
+
38
+
39
+ def _make_filename():
40
+ "Create a random filename for the shared memory object."
41
+ # number of random bytes to use for name
42
+ nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2
43
+ assert nbytes >= 2, '_SHM_NAME_PREFIX too long'
44
+ name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes)
45
+ assert len(name) <= _SHM_SAFE_NAME_LENGTH
46
+ return name
47
+
48
+
49
+ class SharedMemory:
50
+ """Creates a new shared memory block or attaches to an existing
51
+ shared memory block.
52
+
53
+ Every shared memory block is assigned a unique name. This enables
54
+ one process to create a shared memory block with a particular name
55
+ so that a different process can attach to that same shared memory
56
+ block using that same name.
57
+
58
+ As a resource for sharing data across processes, shared memory blocks
59
+ may outlive the original process that created them. When one process
60
+ no longer needs access to a shared memory block that might still be
61
+ needed by other processes, the close() method should be called.
62
+ When a shared memory block is no longer needed by any process, the
63
+ unlink() method should be called to ensure proper cleanup."""
64
+
65
+ # Defaults; enables close() and unlink() to run without errors.
66
+ _name = None
67
+ _fd = -1
68
+ _mmap = None
69
+ _buf = None
70
+ _flags = os.O_RDWR
71
+ _mode = 0o600
72
+ _prepend_leading_slash = True if _USE_POSIX else False
73
+
74
+ def __init__(self, name=None, create=False, size=0):
75
+ if not size >= 0:
76
+ raise ValueError("'size' must be a positive integer")
77
+ if create:
78
+ self._flags = _O_CREX | os.O_RDWR
79
+ if size == 0:
80
+ raise ValueError("'size' must be a positive number different from zero")
81
+ if name is None and not self._flags & os.O_EXCL:
82
+ raise ValueError("'name' can only be None if create=True")
83
+
84
+ if _USE_POSIX:
85
+
86
+ # POSIX Shared Memory
87
+
88
+ if name is None:
89
+ while True:
90
+ name = _make_filename()
91
+ try:
92
+ self._fd = _posixshmem.shm_open(
93
+ name,
94
+ self._flags,
95
+ mode=self._mode
96
+ )
97
+ except FileExistsError:
98
+ continue
99
+ self._name = name
100
+ break
101
+ else:
102
+ name = "/" + name if self._prepend_leading_slash else name
103
+ self._fd = _posixshmem.shm_open(
104
+ name,
105
+ self._flags,
106
+ mode=self._mode
107
+ )
108
+ self._name = name
109
+ try:
110
+ if create and size:
111
+ os.ftruncate(self._fd, size)
112
+ stats = os.fstat(self._fd)
113
+ size = stats.st_size
114
+ self._mmap = mmap.mmap(self._fd, size)
115
+ except OSError:
116
+ self.unlink()
117
+ raise
118
+
119
+ from .resource_tracker import register
120
+ register(self._name, "shared_memory")
121
+
122
+ else:
123
+
124
+ # Windows Named Shared Memory
125
+
126
+ if create:
127
+ while True:
128
+ temp_name = _make_filename() if name is None else name
129
+ # Create and reserve shared memory block with this name
130
+ # until it can be attached to by mmap.
131
+ h_map = _winapi.CreateFileMapping(
132
+ _winapi.INVALID_HANDLE_VALUE,
133
+ _winapi.NULL,
134
+ _winapi.PAGE_READWRITE,
135
+ (size >> 32) & 0xFFFFFFFF,
136
+ size & 0xFFFFFFFF,
137
+ temp_name
138
+ )
139
+ try:
140
+ last_error_code = _winapi.GetLastError()
141
+ if last_error_code == _winapi.ERROR_ALREADY_EXISTS:
142
+ if name is not None:
143
+ raise FileExistsError(
144
+ errno.EEXIST,
145
+ os.strerror(errno.EEXIST),
146
+ name,
147
+ _winapi.ERROR_ALREADY_EXISTS
148
+ )
149
+ else:
150
+ continue
151
+ self._mmap = mmap.mmap(-1, size, tagname=temp_name)
152
+ finally:
153
+ _winapi.CloseHandle(h_map)
154
+ self._name = temp_name
155
+ break
156
+
157
+ else:
158
+ self._name = name
159
+ # Dynamically determine the existing named shared memory
160
+ # block's size which is likely a multiple of mmap.PAGESIZE.
161
+ h_map = _winapi.OpenFileMapping(
162
+ _winapi.FILE_MAP_READ,
163
+ False,
164
+ name
165
+ )
166
+ try:
167
+ p_buf = _winapi.MapViewOfFile(
168
+ h_map,
169
+ _winapi.FILE_MAP_READ,
170
+ 0,
171
+ 0,
172
+ 0
173
+ )
174
+ finally:
175
+ _winapi.CloseHandle(h_map)
176
+ size = _winapi.VirtualQuerySize(p_buf)
177
+ self._mmap = mmap.mmap(-1, size, tagname=name)
178
+
179
+ self._size = size
180
+ self._buf = memoryview(self._mmap)
181
+
182
+ def __del__(self):
183
+ try:
184
+ self.close()
185
+ except OSError:
186
+ pass
187
+
188
+ def __reduce__(self):
189
+ return (
190
+ self.__class__,
191
+ (
192
+ self.name,
193
+ False,
194
+ self.size,
195
+ ),
196
+ )
197
+
198
+ def __repr__(self):
199
+ return f'{self.__class__.__name__}({self.name!r}, size={self.size})'
200
+
201
+ @property
202
+ def buf(self):
203
+ "A memoryview of contents of the shared memory block."
204
+ return self._buf
205
+
206
+ @property
207
+ def name(self):
208
+ "Unique name that identifies the shared memory block."
209
+ reported_name = self._name
210
+ if _USE_POSIX and self._prepend_leading_slash:
211
+ if self._name.startswith("/"):
212
+ reported_name = self._name[1:]
213
+ return reported_name
214
+
215
+ @property
216
+ def size(self):
217
+ "Size in bytes."
218
+ return self._size
219
+
220
+ def close(self):
221
+ """Closes access to the shared memory from this instance but does
222
+ not destroy the shared memory block."""
223
+ if self._buf is not None:
224
+ self._buf.release()
225
+ self._buf = None
226
+ if self._mmap is not None:
227
+ self._mmap.close()
228
+ self._mmap = None
229
+ if _USE_POSIX and self._fd >= 0:
230
+ os.close(self._fd)
231
+ self._fd = -1
232
+
233
+ def unlink(self):
234
+ """Requests that the underlying shared memory block be destroyed.
235
+
236
+ In order to ensure proper cleanup of resources, unlink should be
237
+ called once (and only once) across all processes which have access
238
+ to the shared memory block."""
239
+ if _USE_POSIX and self._name:
240
+ from .resource_tracker import unregister
241
+ _posixshmem.shm_unlink(self._name)
242
+ unregister(self._name, "shared_memory")
243
+
244
+
245
+ _encoding = "utf8"
246
+
247
+ class ShareableList:
248
+ """Pattern for a mutable list-like object shareable via a shared
249
+ memory block. It differs from the built-in list type in that these
250
+ lists can not change their overall length (i.e. no append, insert,
251
+ etc.)
252
+
253
+ Because values are packed into a memoryview as bytes, the struct
254
+ packing format for any storable value must require no more than 8
255
+ characters to describe its format."""
256
+
257
+ # The shared memory area is organized as follows:
258
+ # - 8 bytes: number of items (N) as a 64-bit integer
259
+ # - (N + 1) * 8 bytes: offsets of each element from the start of the
260
+ # data area
261
+ # - K bytes: the data area storing item values (with encoding and size
262
+ # depending on their respective types)
263
+ # - N * 8 bytes: `struct` format string for each element
264
+ # - N bytes: index into _back_transforms_mapping for each element
265
+ # (for reconstructing the corresponding Python value)
266
+ _types_mapping = {
267
+ int: "q",
268
+ float: "d",
269
+ bool: "xxxxxxx?",
270
+ str: "%ds",
271
+ bytes: "%ds",
272
+ None.__class__: "xxxxxx?x",
273
+ }
274
+ _alignment = 8
275
+ _back_transforms_mapping = {
276
+ 0: lambda value: value, # int, float, bool
277
+ 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str
278
+ 2: lambda value: value.rstrip(b'\x00'), # bytes
279
+ 3: lambda _value: None, # None
280
+ }
281
+
282
+ @staticmethod
283
+ def _extract_recreation_code(value):
284
+ """Used in concert with _back_transforms_mapping to convert values
285
+ into the appropriate Python objects when retrieving them from
286
+ the list as well as when storing them."""
287
+ if not isinstance(value, (str, bytes, None.__class__)):
288
+ return 0
289
+ elif isinstance(value, str):
290
+ return 1
291
+ elif isinstance(value, bytes):
292
+ return 2
293
+ else:
294
+ return 3 # NoneType
295
+
296
+ def __init__(self, sequence=None, *, name=None):
297
+ if name is None or sequence is not None:
298
+ sequence = sequence or ()
299
+ _formats = [
300
+ self._types_mapping[type(item)]
301
+ if not isinstance(item, (str, bytes))
302
+ else self._types_mapping[type(item)] % (
303
+ self._alignment * (len(item) // self._alignment + 1),
304
+ )
305
+ for item in sequence
306
+ ]
307
+ self._list_len = len(_formats)
308
+ assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len
309
+ offset = 0
310
+ # The offsets of each list element into the shared memory's
311
+ # data area (0 meaning the start of the data area, not the start
312
+ # of the shared memory area).
313
+ self._allocated_offsets = [0]
314
+ for fmt in _formats:
315
+ offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1])
316
+ self._allocated_offsets.append(offset)
317
+ _recreation_codes = [
318
+ self._extract_recreation_code(item) for item in sequence
319
+ ]
320
+ _recreation_codes = [
321
+ self._extract_recreation_code(item) for item in sequence
322
+ ]
323
+ requested_size = struct.calcsize(
324
+ "q" + self._format_size_metainfo +
325
+ "".join(_formats) +
326
+ self._format_packing_metainfo +
327
+ self._format_back_transform_codes
328
+ )
329
+
330
+ self.shm = SharedMemory(name, create=True, size=requested_size)
331
+ else:
332
+ self.shm = SharedMemory(name)
333
+
334
+ if sequence is not None:
335
+ _enc = _encoding
336
+ struct.pack_into(
337
+ "q" + self._format_size_metainfo,
338
+ self.shm.buf,
339
+ 0,
340
+ self._list_len,
341
+ *(self._allocated_offsets)
342
+ )
343
+ struct.pack_into(
344
+ "".join(_formats),
345
+ self.shm.buf,
346
+ self._offset_data_start,
347
+ *(v.encode(_enc) if isinstance(v, str) else v for v in sequence)
348
+ )
349
+ struct.pack_into(
350
+ self._format_packing_metainfo,
351
+ self.shm.buf,
352
+ self._offset_packing_formats,
353
+ *(v.encode(_enc) for v in _formats)
354
+ )
355
+ struct.pack_into(
356
+ self._format_back_transform_codes,
357
+ self.shm.buf,
358
+ self._offset_back_transform_codes,
359
+ *(_recreation_codes)
360
+ )
361
+
362
+ else:
363
+ self._list_len = len(self) # Obtains size from offset 0 in buffer.
364
+ self._allocated_offsets = list(
365
+ struct.unpack_from(
366
+ self._format_size_metainfo,
367
+ self.shm.buf,
368
+ 1 * 8
369
+ )
370
+ )
371
+
372
+ def _get_packing_format(self, position):
373
+ "Gets the packing format for a single value stored in the list."
374
+ position = position if position >= 0 else position + self._list_len
375
+ if (position >= self._list_len) or (self._list_len < 0):
376
+ raise IndexError("Requested position out of range.")
377
+
378
+ v = struct.unpack_from(
379
+ "8s",
380
+ self.shm.buf,
381
+ self._offset_packing_formats + position * 8
382
+ )[0]
383
+ fmt = v.rstrip(b'\x00')
384
+ fmt_as_str = fmt.decode(_encoding)
385
+
386
+ return fmt_as_str
387
+
388
+ def _get_back_transform(self, position):
389
+ "Gets the back transformation function for a single value."
390
+
391
+ if (position >= self._list_len) or (self._list_len < 0):
392
+ raise IndexError("Requested position out of range.")
393
+
394
+ transform_code = struct.unpack_from(
395
+ "b",
396
+ self.shm.buf,
397
+ self._offset_back_transform_codes + position
398
+ )[0]
399
+ transform_function = self._back_transforms_mapping[transform_code]
400
+
401
+ return transform_function
402
+
403
+ def _set_packing_format_and_transform(self, position, fmt_as_str, value):
404
+ """Sets the packing format and back transformation code for a
405
+ single value in the list at the specified position."""
406
+
407
+ if (position >= self._list_len) or (self._list_len < 0):
408
+ raise IndexError("Requested position out of range.")
409
+
410
+ struct.pack_into(
411
+ "8s",
412
+ self.shm.buf,
413
+ self._offset_packing_formats + position * 8,
414
+ fmt_as_str.encode(_encoding)
415
+ )
416
+
417
+ transform_code = self._extract_recreation_code(value)
418
+ struct.pack_into(
419
+ "b",
420
+ self.shm.buf,
421
+ self._offset_back_transform_codes + position,
422
+ transform_code
423
+ )
424
+
425
+ def __getitem__(self, position):
426
+ position = position if position >= 0 else position + self._list_len
427
+ try:
428
+ offset = self._offset_data_start + self._allocated_offsets[position]
429
+ (v,) = struct.unpack_from(
430
+ self._get_packing_format(position),
431
+ self.shm.buf,
432
+ offset
433
+ )
434
+ except IndexError:
435
+ raise IndexError("index out of range")
436
+
437
+ back_transform = self._get_back_transform(position)
438
+ v = back_transform(v)
439
+
440
+ return v
441
+
442
+ def __setitem__(self, position, value):
443
+ position = position if position >= 0 else position + self._list_len
444
+ try:
445
+ item_offset = self._allocated_offsets[position]
446
+ offset = self._offset_data_start + item_offset
447
+ current_format = self._get_packing_format(position)
448
+ except IndexError:
449
+ raise IndexError("assignment index out of range")
450
+
451
+ if not isinstance(value, (str, bytes)):
452
+ new_format = self._types_mapping[type(value)]
453
+ encoded_value = value
454
+ else:
455
+ allocated_length = self._allocated_offsets[position + 1] - item_offset
456
+
457
+ encoded_value = (value.encode(_encoding)
458
+ if isinstance(value, str) else value)
459
+ if len(encoded_value) > allocated_length:
460
+ raise ValueError("bytes/str item exceeds available storage")
461
+ if current_format[-1] == "s":
462
+ new_format = current_format
463
+ else:
464
+ new_format = self._types_mapping[str] % (
465
+ allocated_length,
466
+ )
467
+
468
+ self._set_packing_format_and_transform(
469
+ position,
470
+ new_format,
471
+ value
472
+ )
473
+ struct.pack_into(new_format, self.shm.buf, offset, encoded_value)
474
+
475
+ def __reduce__(self):
476
+ return partial(self.__class__, name=self.shm.name), ()
477
+
478
+ def __len__(self):
479
+ return struct.unpack_from("q", self.shm.buf, 0)[0]
480
+
481
+ def __repr__(self):
482
+ return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})'
483
+
484
+ @property
485
+ def format(self):
486
+ "The struct packing format used by all currently stored items."
487
+ return "".join(
488
+ self._get_packing_format(i) for i in range(self._list_len)
489
+ )
490
+
491
+ @property
492
+ def _format_size_metainfo(self):
493
+ "The struct packing format used for the items' storage offsets."
494
+ return "q" * (self._list_len + 1)
495
+
496
+ @property
497
+ def _format_packing_metainfo(self):
498
+ "The struct packing format used for the items' packing formats."
499
+ return "8s" * self._list_len
500
+
501
+ @property
502
+ def _format_back_transform_codes(self):
503
+ "The struct packing format used for the items' back transforms."
504
+ return "b" * self._list_len
505
+
506
+ @property
507
+ def _offset_data_start(self):
508
+ # - 8 bytes for the list length
509
+ # - (N + 1) * 8 bytes for the element offsets
510
+ return (self._list_len + 2) * 8
511
+
512
+ @property
513
+ def _offset_packing_formats(self):
514
+ return self._offset_data_start + self._allocated_offsets[-1]
515
+
516
+ @property
517
+ def _offset_back_transform_codes(self):
518
+ return self._offset_packing_formats + self._list_len * 8
519
+
520
+ def count(self, value):
521
+ "L.count(value) -> integer -- return number of occurrences of value."
522
+
523
+ return sum(value == entry for entry in self)
524
+
525
+ def index(self, value):
526
+ """L.index(value) -> integer -- return first index of value.
527
+ Raises ValueError if the value is not present."""
528
+
529
+ for position, entry in enumerate(self):
530
+ if value == entry:
531
+ return position
532
+ else:
533
+ raise ValueError(f"{value!r} not in this container")
534
+
535
+ __class_getitem__ = classmethod(types.GenericAlias)
lib/python3.10/site-packages/multiprocess/spawn.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Code used to start processes when using the spawn or forkserver
3
+ # start methods.
4
+ #
5
+ # multiprocessing/spawn.py
6
+ #
7
+ # Copyright (c) 2006-2008, R Oudkerk
8
+ # Licensed to PSF under a Contributor Agreement.
9
+ #
10
+
11
+ import os
12
+ import sys
13
+ import runpy
14
+ import types
15
+
16
+ from . import get_start_method, set_start_method
17
+ from . import process
18
+ from .context import reduction
19
+ from . import util
20
+
21
+ __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable',
22
+ 'get_preparation_data', 'get_command_line', 'import_main_path']
23
+
24
+ #
25
+ # _python_exe is the assumed path to the python executable.
26
+ # People embedding Python want to modify it.
27
+ #
28
+
29
+ if sys.platform != 'win32':
30
+ WINEXE = False
31
+ WINSERVICE = False
32
+ else:
33
+ WINEXE = getattr(sys, 'frozen', False)
34
+ WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
35
+
36
+ if WINSERVICE:
37
+ _python_exe = os.path.join(sys.exec_prefix, 'python.exe')
38
+ else:
39
+ _python_exe = sys.executable
40
+
41
+ def set_executable(exe):
42
+ global _python_exe
43
+ _python_exe = exe
44
+
45
+ def get_executable():
46
+ return _python_exe
47
+
48
+ #
49
+ #
50
+ #
51
+
52
+ def is_forking(argv):
53
+ '''
54
+ Return whether commandline indicates we are forking
55
+ '''
56
+ if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
57
+ return True
58
+ else:
59
+ return False
60
+
61
+
62
+ def freeze_support():
63
+ '''
64
+ Run code for process object if this in not the main process
65
+ '''
66
+ if is_forking(sys.argv):
67
+ kwds = {}
68
+ for arg in sys.argv[2:]:
69
+ name, value = arg.split('=')
70
+ if value == 'None':
71
+ kwds[name] = None
72
+ else:
73
+ kwds[name] = int(value)
74
+ spawn_main(**kwds)
75
+ sys.exit()
76
+
77
+
78
+ def get_command_line(**kwds):
79
+ '''
80
+ Returns prefix of command line used for spawning a child process
81
+ '''
82
+ if getattr(sys, 'frozen', False):
83
+ return ([sys.executable, '--multiprocessing-fork'] +
84
+ ['%s=%r' % item for item in kwds.items()])
85
+ else:
86
+ prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)'
87
+ prog %= ', '.join('%s=%r' % item for item in kwds.items())
88
+ opts = util._args_from_interpreter_flags()
89
+ return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork']
90
+
91
+
92
+ def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None):
93
+ '''
94
+ Run code specified by data received over pipe
95
+ '''
96
+ assert is_forking(sys.argv), "Not forking"
97
+ if sys.platform == 'win32':
98
+ import msvcrt
99
+ import _winapi
100
+
101
+ if parent_pid is not None:
102
+ source_process = _winapi.OpenProcess(
103
+ _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE,
104
+ False, parent_pid)
105
+ else:
106
+ source_process = None
107
+ new_handle = reduction.duplicate(pipe_handle,
108
+ source_process=source_process)
109
+ fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
110
+ parent_sentinel = source_process
111
+ else:
112
+ from . import resource_tracker
113
+ resource_tracker._resource_tracker._fd = tracker_fd
114
+ fd = pipe_handle
115
+ parent_sentinel = os.dup(pipe_handle)
116
+ exitcode = _main(fd, parent_sentinel)
117
+ sys.exit(exitcode)
118
+
119
+
120
+ def _main(fd, parent_sentinel):
121
+ with os.fdopen(fd, 'rb', closefd=True) as from_parent:
122
+ process.current_process()._inheriting = True
123
+ try:
124
+ preparation_data = reduction.pickle.load(from_parent)
125
+ prepare(preparation_data)
126
+ self = reduction.pickle.load(from_parent)
127
+ finally:
128
+ del process.current_process()._inheriting
129
+ return self._bootstrap(parent_sentinel)
130
+
131
+
132
+ def _check_not_importing_main():
133
+ if getattr(process.current_process(), '_inheriting', False):
134
+ raise RuntimeError('''
135
+ An attempt has been made to start a new process before the
136
+ current process has finished its bootstrapping phase.
137
+
138
+ This probably means that you are not using fork to start your
139
+ child processes and you have forgotten to use the proper idiom
140
+ in the main module:
141
+
142
+ if __name__ == '__main__':
143
+ freeze_support()
144
+ ...
145
+
146
+ The "freeze_support()" line can be omitted if the program
147
+ is not going to be frozen to produce an executable.''')
148
+
149
+
150
+ def get_preparation_data(name):
151
+ '''
152
+ Return info about parent needed by child to unpickle process object
153
+ '''
154
+ _check_not_importing_main()
155
+ d = dict(
156
+ log_to_stderr=util._log_to_stderr,
157
+ authkey=process.current_process().authkey,
158
+ )
159
+
160
+ if util._logger is not None:
161
+ d['log_level'] = util._logger.getEffectiveLevel()
162
+
163
+ sys_path=sys.path.copy()
164
+ try:
165
+ i = sys_path.index('')
166
+ except ValueError:
167
+ pass
168
+ else:
169
+ sys_path[i] = process.ORIGINAL_DIR
170
+
171
+ d.update(
172
+ name=name,
173
+ sys_path=sys_path,
174
+ sys_argv=sys.argv,
175
+ orig_dir=process.ORIGINAL_DIR,
176
+ dir=os.getcwd(),
177
+ start_method=get_start_method(),
178
+ )
179
+
180
+ # Figure out whether to initialise main in the subprocess as a module
181
+ # or through direct execution (or to leave it alone entirely)
182
+ main_module = sys.modules['__main__']
183
+ main_mod_name = getattr(main_module.__spec__, "name", None)
184
+ if main_mod_name is not None:
185
+ d['init_main_from_name'] = main_mod_name
186
+ elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
187
+ main_path = getattr(main_module, '__file__', None)
188
+ if main_path is not None:
189
+ if (not os.path.isabs(main_path) and
190
+ process.ORIGINAL_DIR is not None):
191
+ main_path = os.path.join(process.ORIGINAL_DIR, main_path)
192
+ d['init_main_from_path'] = os.path.normpath(main_path)
193
+
194
+ return d
195
+
196
+ #
197
+ # Prepare current process
198
+ #
199
+
200
+ old_main_modules = []
201
+
202
+ def prepare(data):
203
+ '''
204
+ Try to get current process ready to unpickle process object
205
+ '''
206
+ if 'name' in data:
207
+ process.current_process().name = data['name']
208
+
209
+ if 'authkey' in data:
210
+ process.current_process().authkey = data['authkey']
211
+
212
+ if 'log_to_stderr' in data and data['log_to_stderr']:
213
+ util.log_to_stderr()
214
+
215
+ if 'log_level' in data:
216
+ util.get_logger().setLevel(data['log_level'])
217
+
218
+ if 'sys_path' in data:
219
+ sys.path = data['sys_path']
220
+
221
+ if 'sys_argv' in data:
222
+ sys.argv = data['sys_argv']
223
+
224
+ if 'dir' in data:
225
+ os.chdir(data['dir'])
226
+
227
+ if 'orig_dir' in data:
228
+ process.ORIGINAL_DIR = data['orig_dir']
229
+
230
+ if 'start_method' in data:
231
+ set_start_method(data['start_method'], force=True)
232
+
233
+ if 'init_main_from_name' in data:
234
+ _fixup_main_from_name(data['init_main_from_name'])
235
+ elif 'init_main_from_path' in data:
236
+ _fixup_main_from_path(data['init_main_from_path'])
237
+
238
+ # Multiprocessing module helpers to fix up the main module in
239
+ # spawned subprocesses
240
+ def _fixup_main_from_name(mod_name):
241
+ # __main__.py files for packages, directories, zip archives, etc, run
242
+ # their "main only" code unconditionally, so we don't even try to
243
+ # populate anything in __main__, nor do we make any changes to
244
+ # __main__ attributes
245
+ current_main = sys.modules['__main__']
246
+ if mod_name == "__main__" or mod_name.endswith(".__main__"):
247
+ return
248
+
249
+ # If this process was forked, __main__ may already be populated
250
+ if getattr(current_main.__spec__, "name", None) == mod_name:
251
+ return
252
+
253
+ # Otherwise, __main__ may contain some non-main code where we need to
254
+ # support unpickling it properly. We rerun it as __mp_main__ and make
255
+ # the normal __main__ an alias to that
256
+ old_main_modules.append(current_main)
257
+ main_module = types.ModuleType("__mp_main__")
258
+ main_content = runpy.run_module(mod_name,
259
+ run_name="__mp_main__",
260
+ alter_sys=True)
261
+ main_module.__dict__.update(main_content)
262
+ sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
263
+
264
+
265
+ def _fixup_main_from_path(main_path):
266
+ # If this process was forked, __main__ may already be populated
267
+ current_main = sys.modules['__main__']
268
+
269
+ # Unfortunately, the main ipython launch script historically had no
270
+ # "if __name__ == '__main__'" guard, so we work around that
271
+ # by treating it like a __main__.py file
272
+ # See https://github.com/ipython/ipython/issues/4698
273
+ main_name = os.path.splitext(os.path.basename(main_path))[0]
274
+ if main_name == 'ipython':
275
+ return
276
+
277
+ # Otherwise, if __file__ already has the setting we expect,
278
+ # there's nothing more to do
279
+ if getattr(current_main, '__file__', None) == main_path:
280
+ return
281
+
282
+ # If the parent process has sent a path through rather than a module
283
+ # name we assume it is an executable script that may contain
284
+ # non-main code that needs to be executed
285
+ old_main_modules.append(current_main)
286
+ main_module = types.ModuleType("__mp_main__")
287
+ main_content = runpy.run_path(main_path,
288
+ run_name="__mp_main__")
289
+ main_module.__dict__.update(main_content)
290
+ sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
291
+
292
+
293
+ def import_main_path(main_path):
294
+ '''
295
+ Set sys.modules['__main__'] to module at main_path
296
+ '''
297
+ _fixup_main_from_path(main_path)
lib/python3.10/site-packages/multiprocess/tests/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
lib/python3.10/site-packages/multiprocess/tests/__main__.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #
3
+ # Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
4
+ # Copyright (c) 2018-2021 The Uncertainty Quantification Foundation.
5
+ # License: 3-clause BSD. The full license text is available at:
6
+ # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE
7
+
8
+ from __future__ import print_function
9
+ import glob
10
+ import os
11
+ try:
12
+ import pox
13
+ python = pox.which_python(version=True, fullpath=False) or 'python'
14
+ except ImportError:
15
+ python = 'python'
16
+ import subprocess as sp
17
+ from sys import platform
18
+ shell = platform[:3] == 'win'
19
+
20
+ suite = os.path.dirname(__file__) or os.path.curdir
21
+ tests = glob.glob(suite + os.path.sep + 'test_*.py')
22
+ tests = glob.glob(suite + os.path.sep + '__init__.py') + \
23
+ [i for i in tests if 'main' not in i]
24
+
25
+
26
+ if __name__ == '__main__':
27
+
28
+ for test in tests:
29
+ p = sp.Popen([python, test], shell=shell).wait()
30
+ if not p:
31
+ print('.', end='')
32
+ print('')
33
+