nnilayy commited on
Commit
d032e24
·
verified ·
1 Parent(s): e0dc28c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. lib/python3.10/site-packages/av/container/input.cpython-310-x86_64-linux-gnu.so +3 -0
  3. lib/python3.10/site-packages/babel/locale-data/ann.dat +0 -0
  4. lib/python3.10/site-packages/babel/locale-data/ar_LY.dat +0 -0
  5. lib/python3.10/site-packages/babel/locale-data/en_JM.dat +0 -0
  6. lib/python3.10/site-packages/babel/locale-data/en_NZ.dat +0 -0
  7. lib/python3.10/site-packages/babel/locale-data/ff_Adlm_NG.dat +0 -0
  8. lib/python3.10/site-packages/babel/locale-data/fr_CH.dat +0 -0
  9. lib/python3.10/site-packages/babel/locale-data/ha_Arab.dat +0 -0
  10. lib/python3.10/site-packages/babel/locale-data/jbo_001.dat +0 -0
  11. lib/python3.10/site-packages/babel/locale-data/ltg.dat +0 -0
  12. lib/python3.10/site-packages/babel/locale-data/nds_NL.dat +0 -0
  13. lib/python3.10/site-packages/babel/locale-data/nmg_CM.dat +0 -0
  14. lib/python3.10/site-packages/babel/locale-data/pis_SB.dat +0 -0
  15. lib/python3.10/site-packages/babel/locale-data/prg_PL.dat +0 -0
  16. lib/python3.10/site-packages/babel/locale-data/sah_RU.dat +0 -0
  17. lib/python3.10/site-packages/babel/locale-data/tn_BW.dat +0 -0
  18. lib/python3.10/site-packages/babel/locale-data/vai_Latn.dat +0 -0
  19. lib/python3.10/site-packages/babel/localtime/__init__.py +43 -0
  20. lib/python3.10/site-packages/babel/localtime/_fallback.py +44 -0
  21. lib/python3.10/site-packages/babel/localtime/_helpers.py +57 -0
  22. lib/python3.10/site-packages/babel/localtime/_unix.py +104 -0
  23. lib/python3.10/site-packages/babel/localtime/_win32.py +98 -0
  24. lib/python3.10/site-packages/babel/messages/__init__.py +21 -0
  25. lib/python3.10/site-packages/babel/messages/_compat.py +34 -0
  26. lib/python3.10/site-packages/babel/messages/catalog.py +1000 -0
  27. lib/python3.10/site-packages/babel/messages/checkers.py +168 -0
  28. lib/python3.10/site-packages/babel/messages/extract.py +852 -0
  29. lib/python3.10/site-packages/babel/messages/frontend.py +1202 -0
  30. lib/python3.10/site-packages/babel/messages/jslexer.py +204 -0
  31. lib/python3.10/site-packages/babel/messages/mofile.py +210 -0
  32. lib/python3.10/site-packages/babel/messages/plurals.py +266 -0
  33. lib/python3.10/site-packages/babel/messages/pofile.py +744 -0
  34. lib/python3.10/site-packages/babel/messages/setuptools_frontend.py +108 -0
  35. lib/python3.10/site-packages/numba/__init__.py +253 -0
  36. lib/python3.10/site-packages/numba/__main__.py +6 -0
  37. lib/python3.10/site-packages/numba/_arraystruct.h +21 -0
  38. lib/python3.10/site-packages/numba/_devicearray.cpython-310-x86_64-linux-gnu.so +0 -0
  39. lib/python3.10/site-packages/numba/_devicearray.h +25 -0
  40. lib/python3.10/site-packages/numba/_dynfunc.c +534 -0
  41. lib/python3.10/site-packages/numba/_dynfunc.cpython-310-x86_64-linux-gnu.so +0 -0
  42. lib/python3.10/site-packages/numba/_dynfuncmod.c +93 -0
  43. lib/python3.10/site-packages/numba/_hashtable.h +132 -0
  44. lib/python3.10/site-packages/numba/_helperlib.c +1251 -0
  45. lib/python3.10/site-packages/numba/_helpermod.c +277 -0
  46. lib/python3.10/site-packages/numba/_lapack.c +1946 -0
  47. lib/python3.10/site-packages/numba/_numba_common.h +43 -0
  48. lib/python3.10/site-packages/numba/_pymodule.h +35 -0
  49. lib/python3.10/site-packages/numba/_random.c +492 -0
  50. lib/python3.10/site-packages/numba/_typeof.h +16 -0
.gitattributes CHANGED
@@ -85,3 +85,4 @@ lib/python3.10/site-packages/av/error.cpython-310-x86_64-linux-gnu.so filter=lfs
85
  lib/python3.10/site-packages/av/dictionary.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
86
  lib/python3.10/site-packages/av/container/core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
87
  lib/python3.10/site-packages/av/container/pyio.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
85
  lib/python3.10/site-packages/av/dictionary.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
86
  lib/python3.10/site-packages/av/container/core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
87
  lib/python3.10/site-packages/av/container/pyio.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
88
+ lib/python3.10/site-packages/av/container/input.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
lib/python3.10/site-packages/av/container/input.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d76c55337da2ca9a297456b0de0eacf4cce96faf4447eb4b6ccabab56b077a3d
3
+ size 864209
lib/python3.10/site-packages/babel/locale-data/ann.dat ADDED
Binary file (737 Bytes). View file
 
lib/python3.10/site-packages/babel/locale-data/ar_LY.dat ADDED
Binary file (1.25 kB). View file
 
lib/python3.10/site-packages/babel/locale-data/en_JM.dat ADDED
Binary file (1.67 kB). View file
 
lib/python3.10/site-packages/babel/locale-data/en_NZ.dat ADDED
Binary file (2.34 kB). View file
 
lib/python3.10/site-packages/babel/locale-data/ff_Adlm_NG.dat ADDED
Binary file (658 Bytes). View file
 
lib/python3.10/site-packages/babel/locale-data/fr_CH.dat ADDED
Binary file (2.88 kB). View file
 
lib/python3.10/site-packages/babel/locale-data/ha_Arab.dat ADDED
Binary file (2.2 kB). View file
 
lib/python3.10/site-packages/babel/locale-data/jbo_001.dat ADDED
Binary file (746 Bytes). View file
 
lib/python3.10/site-packages/babel/locale-data/ltg.dat ADDED
Binary file (2.63 kB). View file
 
lib/python3.10/site-packages/babel/locale-data/nds_NL.dat ADDED
Binary file (654 Bytes). View file
 
lib/python3.10/site-packages/babel/locale-data/nmg_CM.dat ADDED
Binary file (636 Bytes). View file
 
lib/python3.10/site-packages/babel/locale-data/pis_SB.dat ADDED
Binary file (617 Bytes). View file
 
lib/python3.10/site-packages/babel/locale-data/prg_PL.dat ADDED
Binary file (654 Bytes). View file
 
lib/python3.10/site-packages/babel/locale-data/sah_RU.dat ADDED
Binary file (654 Bytes). View file
 
lib/python3.10/site-packages/babel/locale-data/tn_BW.dat ADDED
Binary file (654 Bytes). View file
 
lib/python3.10/site-packages/babel/locale-data/vai_Latn.dat ADDED
Binary file (14.3 kB). View file
 
lib/python3.10/site-packages/babel/localtime/__init__.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.localtime
3
+ ~~~~~~~~~~~~~~~
4
+
5
+ Babel specific fork of tzlocal to determine the local timezone
6
+ of the system.
7
+
8
+ :copyright: (c) 2013-2025 by the Babel Team.
9
+ :license: BSD, see LICENSE for more details.
10
+ """
11
+
12
+ import datetime
13
+ import sys
14
+
15
+ if sys.platform == 'win32':
16
+ from babel.localtime._win32 import _get_localzone
17
+ else:
18
+ from babel.localtime._unix import _get_localzone
19
+
20
+
21
+ # TODO(3.0): the offset constants are not part of the public API
22
+ # and should be removed
23
+ from babel.localtime._fallback import (
24
+ DSTDIFF, # noqa: F401
25
+ DSTOFFSET, # noqa: F401
26
+ STDOFFSET, # noqa: F401
27
+ ZERO, # noqa: F401
28
+ _FallbackLocalTimezone,
29
+ )
30
+
31
+
32
+ def get_localzone() -> datetime.tzinfo:
33
+ """Returns the current underlying local timezone object.
34
+ Generally this function does not need to be used, it's a
35
+ better idea to use the :data:`LOCALTZ` singleton instead.
36
+ """
37
+ return _get_localzone()
38
+
39
+
40
+ try:
41
+ LOCALTZ = get_localzone()
42
+ except LookupError:
43
+ LOCALTZ = _FallbackLocalTimezone()
lib/python3.10/site-packages/babel/localtime/_fallback.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.localtime._fallback
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Emulated fallback local timezone when all else fails.
6
+
7
+ :copyright: (c) 2013-2025 by the Babel Team.
8
+ :license: BSD, see LICENSE for more details.
9
+ """
10
+
11
+ import datetime
12
+ import time
13
+
14
+ STDOFFSET = datetime.timedelta(seconds=-time.timezone)
15
+ DSTOFFSET = datetime.timedelta(seconds=-time.altzone) if time.daylight else STDOFFSET
16
+
17
+ DSTDIFF = DSTOFFSET - STDOFFSET
18
+ ZERO = datetime.timedelta(0)
19
+
20
+
21
+ class _FallbackLocalTimezone(datetime.tzinfo):
22
+
23
+ def utcoffset(self, dt: datetime.datetime) -> datetime.timedelta:
24
+ if self._isdst(dt):
25
+ return DSTOFFSET
26
+ else:
27
+ return STDOFFSET
28
+
29
+ def dst(self, dt: datetime.datetime) -> datetime.timedelta:
30
+ if self._isdst(dt):
31
+ return DSTDIFF
32
+ else:
33
+ return ZERO
34
+
35
+ def tzname(self, dt: datetime.datetime) -> str:
36
+ return time.tzname[self._isdst(dt)]
37
+
38
+ def _isdst(self, dt: datetime.datetime) -> bool:
39
+ tt = (dt.year, dt.month, dt.day,
40
+ dt.hour, dt.minute, dt.second,
41
+ dt.weekday(), 0, -1)
42
+ stamp = time.mktime(tt)
43
+ tt = time.localtime(stamp)
44
+ return tt.tm_isdst > 0
lib/python3.10/site-packages/babel/localtime/_helpers.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ import pytz
3
+ except ModuleNotFoundError:
4
+ pytz = None
5
+
6
+ try:
7
+ import zoneinfo
8
+ except ModuleNotFoundError:
9
+ zoneinfo = None
10
+
11
+
12
+ def _get_tzinfo(tzenv: str):
13
+ """Get the tzinfo from `zoneinfo` or `pytz`
14
+
15
+ :param tzenv: timezone in the form of Continent/City
16
+ :return: tzinfo object or None if not found
17
+ """
18
+ if pytz:
19
+ try:
20
+ return pytz.timezone(tzenv)
21
+ except pytz.UnknownTimeZoneError:
22
+ pass
23
+ else:
24
+ try:
25
+ return zoneinfo.ZoneInfo(tzenv)
26
+ except ValueError as ve:
27
+ # This is somewhat hacky, but since _validate_tzfile_path() doesn't
28
+ # raise a specific error type, we'll need to check the message to be
29
+ # one we know to be from that function.
30
+ # If so, we pretend it meant that the TZ didn't exist, for the benefit
31
+ # of `babel.localtime` catching the `LookupError` raised by
32
+ # `_get_tzinfo_or_raise()`.
33
+ # See https://github.com/python-babel/babel/issues/1092
34
+ if str(ve).startswith("ZoneInfo keys "):
35
+ return None
36
+ except zoneinfo.ZoneInfoNotFoundError:
37
+ pass
38
+
39
+ return None
40
+
41
+
42
+ def _get_tzinfo_or_raise(tzenv: str):
43
+ tzinfo = _get_tzinfo(tzenv)
44
+ if tzinfo is None:
45
+ raise LookupError(
46
+ f"Can not find timezone {tzenv}. \n"
47
+ "Timezone names are generally in the form `Continent/City`.",
48
+ )
49
+ return tzinfo
50
+
51
+
52
+ def _get_tzinfo_from_file(tzfilename: str):
53
+ with open(tzfilename, 'rb') as tzfile:
54
+ if pytz:
55
+ return pytz.tzfile.build_tzinfo('local', tzfile)
56
+ else:
57
+ return zoneinfo.ZoneInfo.from_file(tzfile)
lib/python3.10/site-packages/babel/localtime/_unix.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import os
3
+ import re
4
+
5
+ from babel.localtime._helpers import (
6
+ _get_tzinfo,
7
+ _get_tzinfo_from_file,
8
+ _get_tzinfo_or_raise,
9
+ )
10
+
11
+
12
+ def _tz_from_env(tzenv: str) -> datetime.tzinfo:
13
+ if tzenv[0] == ':':
14
+ tzenv = tzenv[1:]
15
+
16
+ # TZ specifies a file
17
+ if os.path.exists(tzenv):
18
+ return _get_tzinfo_from_file(tzenv)
19
+
20
+ # TZ specifies a zoneinfo zone.
21
+ return _get_tzinfo_or_raise(tzenv)
22
+
23
+
24
+ def _get_localzone(_root: str = '/') -> datetime.tzinfo:
25
+ """Tries to find the local timezone configuration.
26
+ This method prefers finding the timezone name and passing that to
27
+ zoneinfo or pytz, over passing in the localtime file, as in the later
28
+ case the zoneinfo name is unknown.
29
+ The parameter _root makes the function look for files like /etc/localtime
30
+ beneath the _root directory. This is primarily used by the tests.
31
+ In normal usage you call the function without parameters.
32
+ """
33
+
34
+ tzenv = os.environ.get('TZ')
35
+ if tzenv:
36
+ return _tz_from_env(tzenv)
37
+
38
+ # This is actually a pretty reliable way to test for the local time
39
+ # zone on operating systems like OS X. On OS X especially this is the
40
+ # only one that actually works.
41
+ try:
42
+ link_dst = os.readlink('/etc/localtime')
43
+ except OSError:
44
+ pass
45
+ else:
46
+ pos = link_dst.find('/zoneinfo/')
47
+ if pos >= 0:
48
+ # On occasion, the `/etc/localtime` symlink has a double slash, e.g.
49
+ # "/usr/share/zoneinfo//UTC", which would make `zoneinfo.ZoneInfo`
50
+ # complain (no absolute paths allowed), and we'd end up returning
51
+ # `None` (as a fix for #1092).
52
+ # Instead, let's just "fix" the double slash symlink by stripping
53
+ # leading slashes before passing the assumed zone name forward.
54
+ zone_name = link_dst[pos + 10:].lstrip("/")
55
+ tzinfo = _get_tzinfo(zone_name)
56
+ if tzinfo is not None:
57
+ return tzinfo
58
+
59
+ # Now look for distribution specific configuration files
60
+ # that contain the timezone name.
61
+ tzpath = os.path.join(_root, 'etc/timezone')
62
+ if os.path.exists(tzpath):
63
+ with open(tzpath, 'rb') as tzfile:
64
+ data = tzfile.read()
65
+
66
+ # Issue #3 in tzlocal was that /etc/timezone was a zoneinfo file.
67
+ # That's a misconfiguration, but we need to handle it gracefully:
68
+ if data[:5] != b'TZif2':
69
+ etctz = data.strip().decode()
70
+ # Get rid of host definitions and comments:
71
+ if ' ' in etctz:
72
+ etctz, dummy = etctz.split(' ', 1)
73
+ if '#' in etctz:
74
+ etctz, dummy = etctz.split('#', 1)
75
+
76
+ return _get_tzinfo_or_raise(etctz.replace(' ', '_'))
77
+
78
+ # CentOS has a ZONE setting in /etc/sysconfig/clock,
79
+ # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and
80
+ # Gentoo has a TIMEZONE setting in /etc/conf.d/clock
81
+ # We look through these files for a timezone:
82
+ timezone_re = re.compile(r'\s*(TIME)?ZONE\s*=\s*"(?P<etctz>.+)"')
83
+
84
+ for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'):
85
+ tzpath = os.path.join(_root, filename)
86
+ if not os.path.exists(tzpath):
87
+ continue
88
+ with open(tzpath) as tzfile:
89
+ for line in tzfile:
90
+ match = timezone_re.match(line)
91
+ if match is not None:
92
+ # We found a timezone
93
+ etctz = match.group("etctz")
94
+ return _get_tzinfo_or_raise(etctz.replace(' ', '_'))
95
+
96
+ # No explicit setting existed. Use localtime
97
+ for filename in ('etc/localtime', 'usr/local/etc/localtime'):
98
+ tzpath = os.path.join(_root, filename)
99
+
100
+ if not os.path.exists(tzpath):
101
+ continue
102
+ return _get_tzinfo_from_file(tzpath)
103
+
104
+ raise LookupError('Can not find any timezone configuration')
lib/python3.10/site-packages/babel/localtime/_win32.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ try:
4
+ import winreg
5
+ except ImportError:
6
+ winreg = None
7
+
8
+ import datetime
9
+ from typing import Any, Dict, cast
10
+
11
+ from babel.core import get_global
12
+ from babel.localtime._helpers import _get_tzinfo_or_raise
13
+
14
+ # When building the cldr data on windows this module gets imported.
15
+ # Because at that point there is no global.dat yet this call will
16
+ # fail. We want to catch it down in that case then and just assume
17
+ # the mapping was empty.
18
+ try:
19
+ tz_names: dict[str, str] = cast(Dict[str, str], get_global('windows_zone_mapping'))
20
+ except RuntimeError:
21
+ tz_names = {}
22
+
23
+
24
+ def valuestodict(key) -> dict[str, Any]:
25
+ """Convert a registry key's values to a dictionary."""
26
+ dict = {}
27
+ size = winreg.QueryInfoKey(key)[1]
28
+ for i in range(size):
29
+ data = winreg.EnumValue(key, i)
30
+ dict[data[0]] = data[1]
31
+ return dict
32
+
33
+
34
+ def get_localzone_name() -> str:
35
+ # Windows is special. It has unique time zone names (in several
36
+ # meanings of the word) available, but unfortunately, they can be
37
+ # translated to the language of the operating system, so we need to
38
+ # do a backwards lookup, by going through all time zones and see which
39
+ # one matches.
40
+ handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
41
+
42
+ TZLOCALKEYNAME = r'SYSTEM\CurrentControlSet\Control\TimeZoneInformation'
43
+ localtz = winreg.OpenKey(handle, TZLOCALKEYNAME)
44
+ keyvalues = valuestodict(localtz)
45
+ localtz.Close()
46
+ if 'TimeZoneKeyName' in keyvalues:
47
+ # Windows 7 (and Vista?)
48
+
49
+ # For some reason this returns a string with loads of NUL bytes at
50
+ # least on some systems. I don't know if this is a bug somewhere, I
51
+ # just work around it.
52
+ tzkeyname = keyvalues['TimeZoneKeyName'].split('\x00', 1)[0]
53
+ else:
54
+ # Windows 2000 or XP
55
+
56
+ # This is the localized name:
57
+ tzwin = keyvalues['StandardName']
58
+
59
+ # Open the list of timezones to look up the real name:
60
+ TZKEYNAME = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones'
61
+ tzkey = winreg.OpenKey(handle, TZKEYNAME)
62
+
63
+ # Now, match this value to Time Zone information
64
+ tzkeyname = None
65
+ for i in range(winreg.QueryInfoKey(tzkey)[0]):
66
+ subkey = winreg.EnumKey(tzkey, i)
67
+ sub = winreg.OpenKey(tzkey, subkey)
68
+ data = valuestodict(sub)
69
+ sub.Close()
70
+ if data.get('Std', None) == tzwin:
71
+ tzkeyname = subkey
72
+ break
73
+
74
+ tzkey.Close()
75
+ handle.Close()
76
+
77
+ if tzkeyname is None:
78
+ raise LookupError('Can not find Windows timezone configuration')
79
+
80
+ timezone = tz_names.get(tzkeyname)
81
+ if timezone is None:
82
+ # Nope, that didn't work. Try adding 'Standard Time',
83
+ # it seems to work a lot of times:
84
+ timezone = tz_names.get(f"{tzkeyname} Standard Time")
85
+
86
+ # Return what we have.
87
+ if timezone is None:
88
+ raise LookupError(f"Can not find timezone {tzkeyname}")
89
+
90
+ return timezone
91
+
92
+
93
+ def _get_localzone() -> datetime.tzinfo:
94
+ if winreg is None:
95
+ raise LookupError(
96
+ 'Runtime support not available')
97
+
98
+ return _get_tzinfo_or_raise(get_localzone_name())
lib/python3.10/site-packages/babel/messages/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.messages
3
+ ~~~~~~~~~~~~~~
4
+
5
+ Support for ``gettext`` message catalogs.
6
+
7
+ :copyright: (c) 2013-2025 by the Babel Team.
8
+ :license: BSD, see LICENSE for more details.
9
+ """
10
+
11
+ from babel.messages.catalog import (
12
+ Catalog,
13
+ Message,
14
+ TranslationError,
15
+ )
16
+
17
+ __all__ = [
18
+ "Catalog",
19
+ "Message",
20
+ "TranslationError",
21
+ ]
lib/python3.10/site-packages/babel/messages/_compat.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from functools import partial
3
+
4
+
5
+ def find_entrypoints(group_name: str):
6
+ """
7
+ Find entrypoints of a given group using either `importlib.metadata` or the
8
+ older `pkg_resources` mechanism.
9
+
10
+ Yields tuples of the entrypoint name and a callable function that will
11
+ load the actual entrypoint.
12
+ """
13
+ if sys.version_info >= (3, 10):
14
+ # "Changed in version 3.10: importlib.metadata is no longer provisional."
15
+ try:
16
+ from importlib.metadata import entry_points
17
+ except ImportError:
18
+ pass
19
+ else:
20
+ eps = entry_points(group=group_name)
21
+ # Only do this if this implementation of `importlib.metadata` is
22
+ # modern enough to not return a dict.
23
+ if not isinstance(eps, dict):
24
+ for entry_point in eps:
25
+ yield (entry_point.name, entry_point.load)
26
+ return
27
+
28
+ try:
29
+ from pkg_resources import working_set
30
+ except ImportError:
31
+ pass
32
+ else:
33
+ for entry_point in working_set.iter_entry_points(group_name):
34
+ yield (entry_point.name, partial(entry_point.load, require=True))
lib/python3.10/site-packages/babel/messages/catalog.py ADDED
@@ -0,0 +1,1000 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.messages.catalog
3
+ ~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Data structures for message catalogs.
6
+
7
+ :copyright: (c) 2013-2025 by the Babel Team.
8
+ :license: BSD, see LICENSE for more details.
9
+ """
10
+ from __future__ import annotations
11
+
12
+ import datetime
13
+ import re
14
+ from collections.abc import Iterable, Iterator
15
+ from copy import copy
16
+ from difflib import SequenceMatcher
17
+ from email import message_from_string
18
+ from heapq import nlargest
19
+ from string import Formatter
20
+ from typing import TYPE_CHECKING
21
+
22
+ from babel import __version__ as VERSION
23
+ from babel.core import Locale, UnknownLocaleError
24
+ from babel.dates import format_datetime
25
+ from babel.messages.plurals import get_plural
26
+ from babel.util import LOCALTZ, FixedOffsetTimezone, _cmp, distinct
27
+
28
+ if TYPE_CHECKING:
29
+ from typing_extensions import TypeAlias
30
+
31
+ _MessageID: TypeAlias = str | tuple[str, ...] | list[str]
32
+
33
+ __all__ = [
34
+ 'DEFAULT_HEADER',
35
+ 'PYTHON_FORMAT',
36
+ 'Catalog',
37
+ 'Message',
38
+ 'TranslationError',
39
+ ]
40
+
41
+
42
+ def get_close_matches(word, possibilities, n=3, cutoff=0.6):
43
+ """A modified version of ``difflib.get_close_matches``.
44
+
45
+ It just passes ``autojunk=False`` to the ``SequenceMatcher``, to work
46
+ around https://github.com/python/cpython/issues/90825.
47
+ """
48
+ if not n > 0: # pragma: no cover
49
+ raise ValueError(f"n must be > 0: {n!r}")
50
+ if not 0.0 <= cutoff <= 1.0: # pragma: no cover
51
+ raise ValueError(f"cutoff must be in [0.0, 1.0]: {cutoff!r}")
52
+ result = []
53
+ s = SequenceMatcher(autojunk=False) # only line changed from difflib.py
54
+ s.set_seq2(word)
55
+ for x in possibilities:
56
+ s.set_seq1(x)
57
+ if s.real_quick_ratio() >= cutoff and \
58
+ s.quick_ratio() >= cutoff and \
59
+ s.ratio() >= cutoff:
60
+ result.append((s.ratio(), x))
61
+
62
+ # Move the best scorers to head of list
63
+ result = nlargest(n, result)
64
+ # Strip scores for the best n matches
65
+ return [x for score, x in result]
66
+
67
+
68
+ PYTHON_FORMAT = re.compile(r'''
69
+ \%
70
+ (?:\(([\w]*)\))?
71
+ (
72
+ [-#0\ +]?(?:\*|[\d]+)?
73
+ (?:\.(?:\*|[\d]+))?
74
+ [hlL]?
75
+ )
76
+ ([diouxXeEfFgGcrs%])
77
+ ''', re.VERBOSE)
78
+
79
+
80
+ def _has_python_brace_format(string: str) -> bool:
81
+ if "{" not in string:
82
+ return False
83
+ fmt = Formatter()
84
+ try:
85
+ # `fmt.parse` returns 3-or-4-tuples of the form
86
+ # `(literal_text, field_name, format_spec, conversion)`;
87
+ # if `field_name` is set, this smells like brace format
88
+ field_name_seen = False
89
+ for t in fmt.parse(string):
90
+ if t[1] is not None:
91
+ field_name_seen = True
92
+ # We cannot break here, as we need to consume the whole string
93
+ # to ensure that it is a valid format string.
94
+ except ValueError:
95
+ return False
96
+ return field_name_seen
97
+
98
+
99
+ def _parse_datetime_header(value: str) -> datetime.datetime:
100
+ match = re.match(r'^(?P<datetime>.*?)(?P<tzoffset>[+-]\d{4})?$', value)
101
+
102
+ dt = datetime.datetime.strptime(match.group('datetime'), '%Y-%m-%d %H:%M')
103
+
104
+ # Separate the offset into a sign component, hours, and # minutes
105
+ tzoffset = match.group('tzoffset')
106
+ if tzoffset is not None:
107
+ plus_minus_s, rest = tzoffset[0], tzoffset[1:]
108
+ hours_offset_s, mins_offset_s = rest[:2], rest[2:]
109
+
110
+ # Make them all integers
111
+ plus_minus = int(f"{plus_minus_s}1")
112
+ hours_offset = int(hours_offset_s)
113
+ mins_offset = int(mins_offset_s)
114
+
115
+ # Calculate net offset
116
+ net_mins_offset = hours_offset * 60
117
+ net_mins_offset += mins_offset
118
+ net_mins_offset *= plus_minus
119
+
120
+ # Create an offset object
121
+ tzoffset = FixedOffsetTimezone(net_mins_offset)
122
+
123
+ # Store the offset in a datetime object
124
+ dt = dt.replace(tzinfo=tzoffset)
125
+
126
+ return dt
127
+
128
+
129
+ class Message:
130
+ """Representation of a single message in a catalog."""
131
+
132
+ def __init__(
133
+ self,
134
+ id: _MessageID,
135
+ string: _MessageID | None = '',
136
+ locations: Iterable[tuple[str, int]] = (),
137
+ flags: Iterable[str] = (),
138
+ auto_comments: Iterable[str] = (),
139
+ user_comments: Iterable[str] = (),
140
+ previous_id: _MessageID = (),
141
+ lineno: int | None = None,
142
+ context: str | None = None,
143
+ ) -> None:
144
+ """Create the message object.
145
+
146
+ :param id: the message ID, or a ``(singular, plural)`` tuple for
147
+ pluralizable messages
148
+ :param string: the translated message string, or a
149
+ ``(singular, plural)`` tuple for pluralizable messages
150
+ :param locations: a sequence of ``(filename, lineno)`` tuples
151
+ :param flags: a set or sequence of flags
152
+ :param auto_comments: a sequence of automatic comments for the message
153
+ :param user_comments: a sequence of user comments for the message
154
+ :param previous_id: the previous message ID, or a ``(singular, plural)``
155
+ tuple for pluralizable messages
156
+ :param lineno: the line number on which the msgid line was found in the
157
+ PO file, if any
158
+ :param context: the message context
159
+ """
160
+ self.id = id
161
+ if not string and self.pluralizable:
162
+ string = ('', '')
163
+ self.string = string
164
+ self.locations = list(distinct(locations))
165
+ self.flags = set(flags)
166
+ if id and self.python_format:
167
+ self.flags.add('python-format')
168
+ else:
169
+ self.flags.discard('python-format')
170
+ if id and self.python_brace_format:
171
+ self.flags.add('python-brace-format')
172
+ else:
173
+ self.flags.discard('python-brace-format')
174
+ self.auto_comments = list(distinct(auto_comments))
175
+ self.user_comments = list(distinct(user_comments))
176
+ if isinstance(previous_id, str):
177
+ self.previous_id = [previous_id]
178
+ else:
179
+ self.previous_id = list(previous_id)
180
+ self.lineno = lineno
181
+ self.context = context
182
+
183
+ def __repr__(self) -> str:
184
+ return f"<{type(self).__name__} {self.id!r} (flags: {list(self.flags)!r})>"
185
+
186
+ def __cmp__(self, other: object) -> int:
187
+ """Compare Messages, taking into account plural ids"""
188
+ def values_to_compare(obj):
189
+ if isinstance(obj, Message) and obj.pluralizable:
190
+ return obj.id[0], obj.context or ''
191
+ return obj.id, obj.context or ''
192
+ return _cmp(values_to_compare(self), values_to_compare(other))
193
+
194
+ def __gt__(self, other: object) -> bool:
195
+ return self.__cmp__(other) > 0
196
+
197
+ def __lt__(self, other: object) -> bool:
198
+ return self.__cmp__(other) < 0
199
+
200
+ def __ge__(self, other: object) -> bool:
201
+ return self.__cmp__(other) >= 0
202
+
203
+ def __le__(self, other: object) -> bool:
204
+ return self.__cmp__(other) <= 0
205
+
206
+ def __eq__(self, other: object) -> bool:
207
+ return self.__cmp__(other) == 0
208
+
209
+ def __ne__(self, other: object) -> bool:
210
+ return self.__cmp__(other) != 0
211
+
212
+ def is_identical(self, other: Message) -> bool:
213
+ """Checks whether messages are identical, taking into account all
214
+ properties.
215
+ """
216
+ assert isinstance(other, Message)
217
+ return self.__dict__ == other.__dict__
218
+
219
+ def clone(self) -> Message:
220
+ return Message(*map(copy, (self.id, self.string, self.locations,
221
+ self.flags, self.auto_comments,
222
+ self.user_comments, self.previous_id,
223
+ self.lineno, self.context)))
224
+
225
+ def check(self, catalog: Catalog | None = None) -> list[TranslationError]:
226
+ """Run various validation checks on the message. Some validations
227
+ are only performed if the catalog is provided. This method returns
228
+ a sequence of `TranslationError` objects.
229
+
230
+ :rtype: ``iterator``
231
+ :param catalog: A catalog instance that is passed to the checkers
232
+ :see: `Catalog.check` for a way to perform checks for all messages
233
+ in a catalog.
234
+ """
235
+ from babel.messages.checkers import checkers
236
+ errors: list[TranslationError] = []
237
+ for checker in checkers:
238
+ try:
239
+ checker(catalog, self)
240
+ except TranslationError as e:
241
+ errors.append(e)
242
+ return errors
243
+
244
+ @property
245
+ def fuzzy(self) -> bool:
246
+ """Whether the translation is fuzzy.
247
+
248
+ >>> Message('foo').fuzzy
249
+ False
250
+ >>> msg = Message('foo', 'foo', flags=['fuzzy'])
251
+ >>> msg.fuzzy
252
+ True
253
+ >>> msg
254
+ <Message 'foo' (flags: ['fuzzy'])>
255
+
256
+ :type: `bool`"""
257
+ return 'fuzzy' in self.flags
258
+
259
+ @property
260
+ def pluralizable(self) -> bool:
261
+ """Whether the message is plurizable.
262
+
263
+ >>> Message('foo').pluralizable
264
+ False
265
+ >>> Message(('foo', 'bar')).pluralizable
266
+ True
267
+
268
+ :type: `bool`"""
269
+ return isinstance(self.id, (list, tuple))
270
+
271
+ @property
272
+ def python_format(self) -> bool:
273
+ """Whether the message contains Python-style parameters.
274
+
275
+ >>> Message('foo %(name)s bar').python_format
276
+ True
277
+ >>> Message(('foo %(name)s', 'foo %(name)s')).python_format
278
+ True
279
+
280
+ :type: `bool`"""
281
+ ids = self.id
282
+ if not isinstance(ids, (list, tuple)):
283
+ ids = [ids]
284
+ return any(PYTHON_FORMAT.search(id) for id in ids)
285
+
286
+ @property
287
+ def python_brace_format(self) -> bool:
288
+ """Whether the message contains Python f-string parameters.
289
+
290
+ >>> Message('Hello, {name}!').python_brace_format
291
+ True
292
+ >>> Message(('One apple', '{count} apples')).python_brace_format
293
+ True
294
+
295
+ :type: `bool`"""
296
+ ids = self.id
297
+ if not isinstance(ids, (list, tuple)):
298
+ ids = [ids]
299
+ return any(_has_python_brace_format(id) for id in ids)
300
+
301
+
302
+ class TranslationError(Exception):
303
+ """Exception thrown by translation checkers when invalid message
304
+ translations are encountered."""
305
+
306
+
307
+ DEFAULT_HEADER = """\
308
+ # Translations template for PROJECT.
309
+ # Copyright (C) YEAR ORGANIZATION
310
+ # This file is distributed under the same license as the PROJECT project.
311
+ # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
312
+ #"""
313
+
314
+
315
+ def parse_separated_header(value: str) -> dict[str, str]:
316
+ # Adapted from https://peps.python.org/pep-0594/#cgi
317
+ from email.message import Message
318
+ m = Message()
319
+ m['content-type'] = value
320
+ return dict(m.get_params())
321
+
322
+
323
+ def _force_text(s: str | bytes, encoding: str = 'utf-8', errors: str = 'strict') -> str:
324
+ if isinstance(s, str):
325
+ return s
326
+ if isinstance(s, bytes):
327
+ return s.decode(encoding, errors)
328
+ return str(s)
329
+
330
+
331
+ class Catalog:
332
+ """Representation of a message catalog."""
333
+
334
+ def __init__(
335
+ self,
336
+ locale: Locale | str | None = None,
337
+ domain: str | None = None,
338
+ header_comment: str | None = DEFAULT_HEADER,
339
+ project: str | None = None,
340
+ version: str | None = None,
341
+ copyright_holder: str | None = None,
342
+ msgid_bugs_address: str | None = None,
343
+ creation_date: datetime.datetime | str | None = None,
344
+ revision_date: datetime.datetime | datetime.time | float | str | None = None,
345
+ last_translator: str | None = None,
346
+ language_team: str | None = None,
347
+ charset: str | None = None,
348
+ fuzzy: bool = True,
349
+ ) -> None:
350
+ """Initialize the catalog object.
351
+
352
+ :param locale: the locale identifier or `Locale` object, or `None`
353
+ if the catalog is not bound to a locale (which basically
354
+ means it's a template)
355
+ :param domain: the message domain
356
+ :param header_comment: the header comment as string, or `None` for the
357
+ default header
358
+ :param project: the project's name
359
+ :param version: the project's version
360
+ :param copyright_holder: the copyright holder of the catalog
361
+ :param msgid_bugs_address: the email address or URL to submit bug
362
+ reports to
363
+ :param creation_date: the date the catalog was created
364
+ :param revision_date: the date the catalog was revised
365
+ :param last_translator: the name and email of the last translator
366
+ :param language_team: the name and email of the language team
367
+ :param charset: the encoding to use in the output (defaults to utf-8)
368
+ :param fuzzy: the fuzzy bit on the catalog header
369
+ """
370
+ self.domain = domain
371
+ self.locale = locale
372
+ self._header_comment = header_comment
373
+ self._messages: dict[str | tuple[str, str], Message] = {}
374
+
375
+ self.project = project or 'PROJECT'
376
+ self.version = version or 'VERSION'
377
+ self.copyright_holder = copyright_holder or 'ORGANIZATION'
378
+ self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
379
+
380
+ self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
381
+ """Name and email address of the last translator."""
382
+ self.language_team = language_team or 'LANGUAGE <[email protected]>'
383
+ """Name and email address of the language team."""
384
+
385
+ self.charset = charset or 'utf-8'
386
+
387
+ if creation_date is None:
388
+ creation_date = datetime.datetime.now(LOCALTZ)
389
+ elif isinstance(creation_date, datetime.datetime) and not creation_date.tzinfo:
390
+ creation_date = creation_date.replace(tzinfo=LOCALTZ)
391
+ self.creation_date = creation_date
392
+ if revision_date is None:
393
+ revision_date = 'YEAR-MO-DA HO:MI+ZONE'
394
+ elif isinstance(revision_date, datetime.datetime) and not revision_date.tzinfo:
395
+ revision_date = revision_date.replace(tzinfo=LOCALTZ)
396
+ self.revision_date = revision_date
397
+ self.fuzzy = fuzzy
398
+
399
+ # Dictionary of obsolete messages
400
+ self.obsolete: dict[str | tuple[str, str], Message] = {}
401
+ self._num_plurals = None
402
+ self._plural_expr = None
403
+
404
+ def _set_locale(self, locale: Locale | str | None) -> None:
405
+ if locale is None:
406
+ self._locale_identifier = None
407
+ self._locale = None
408
+ return
409
+
410
+ if isinstance(locale, Locale):
411
+ self._locale_identifier = str(locale)
412
+ self._locale = locale
413
+ return
414
+
415
+ if isinstance(locale, str):
416
+ self._locale_identifier = str(locale)
417
+ try:
418
+ self._locale = Locale.parse(locale)
419
+ except UnknownLocaleError:
420
+ self._locale = None
421
+ return
422
+
423
+ raise TypeError(f"`locale` must be a Locale, a locale identifier string, or None; got {locale!r}")
424
+
425
+ def _get_locale(self) -> Locale | None:
426
+ return self._locale
427
+
428
+ def _get_locale_identifier(self) -> str | None:
429
+ return self._locale_identifier
430
+
431
+ locale = property(_get_locale, _set_locale)
432
+ locale_identifier = property(_get_locale_identifier)
433
+
434
+ def _get_header_comment(self) -> str:
435
+ comment = self._header_comment
436
+ year = datetime.datetime.now(LOCALTZ).strftime('%Y')
437
+ if hasattr(self.revision_date, 'strftime'):
438
+ year = self.revision_date.strftime('%Y')
439
+ comment = comment.replace('PROJECT', self.project) \
440
+ .replace('VERSION', self.version) \
441
+ .replace('YEAR', year) \
442
+ .replace('ORGANIZATION', self.copyright_holder)
443
+ locale_name = (self.locale.english_name if self.locale else self.locale_identifier)
444
+ if locale_name:
445
+ comment = comment.replace("Translations template", f"{locale_name} translations")
446
+ return comment
447
+
448
+ def _set_header_comment(self, string: str | None) -> None:
449
+ self._header_comment = string
450
+
451
+ header_comment = property(_get_header_comment, _set_header_comment, doc="""\
452
+ The header comment for the catalog.
453
+
454
+ >>> catalog = Catalog(project='Foobar', version='1.0',
455
+ ... copyright_holder='Foo Company')
456
+ >>> print(catalog.header_comment) #doctest: +ELLIPSIS
457
+ # Translations template for Foobar.
458
+ # Copyright (C) ... Foo Company
459
+ # This file is distributed under the same license as the Foobar project.
460
+ # FIRST AUTHOR <EMAIL@ADDRESS>, ....
461
+ #
462
+
463
+ The header can also be set from a string. Any known upper-case variables
464
+ will be replaced when the header is retrieved again:
465
+
466
+ >>> catalog = Catalog(project='Foobar', version='1.0',
467
+ ... copyright_holder='Foo Company')
468
+ >>> catalog.header_comment = '''\\
469
+ ... # The POT for my really cool PROJECT project.
470
+ ... # Copyright (C) 1990-2003 ORGANIZATION
471
+ ... # This file is distributed under the same license as the PROJECT
472
+ ... # project.
473
+ ... #'''
474
+ >>> print(catalog.header_comment)
475
+ # The POT for my really cool Foobar project.
476
+ # Copyright (C) 1990-2003 Foo Company
477
+ # This file is distributed under the same license as the Foobar
478
+ # project.
479
+ #
480
+
481
+ :type: `unicode`
482
+ """)
483
+
484
+ def _get_mime_headers(self) -> list[tuple[str, str]]:
485
+ if isinstance(self.revision_date, (datetime.datetime, datetime.time, int, float)):
486
+ revision_date = format_datetime(self.revision_date, 'yyyy-MM-dd HH:mmZ', locale='en')
487
+ else:
488
+ revision_date = self.revision_date
489
+
490
+ language_team = self.language_team
491
+ if self.locale_identifier and 'LANGUAGE' in language_team:
492
+ language_team = language_team.replace('LANGUAGE', str(self.locale_identifier))
493
+
494
+ headers: list[tuple[str, str]] = [
495
+ ("Project-Id-Version", f"{self.project} {self.version}"),
496
+ ('Report-Msgid-Bugs-To', self.msgid_bugs_address),
497
+ ('POT-Creation-Date', format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ', locale='en')),
498
+ ('PO-Revision-Date', revision_date),
499
+ ('Last-Translator', self.last_translator),
500
+ ]
501
+ if self.locale_identifier:
502
+ headers.append(('Language', str(self.locale_identifier)))
503
+ headers.append(('Language-Team', language_team))
504
+ if self.locale is not None:
505
+ headers.append(('Plural-Forms', self.plural_forms))
506
+ headers += [
507
+ ('MIME-Version', '1.0'),
508
+ ("Content-Type", f"text/plain; charset={self.charset}"),
509
+ ('Content-Transfer-Encoding', '8bit'),
510
+ ("Generated-By", f"Babel {VERSION}\n"),
511
+ ]
512
+ return headers
513
+
514
+ def _set_mime_headers(self, headers: Iterable[tuple[str, str]]) -> None:
515
+ for name, value in headers:
516
+ name = _force_text(name.lower(), encoding=self.charset)
517
+ value = _force_text(value, encoding=self.charset)
518
+ if name == 'project-id-version':
519
+ parts = value.split(' ')
520
+ self.project = ' '.join(parts[:-1])
521
+ self.version = parts[-1]
522
+ elif name == 'report-msgid-bugs-to':
523
+ self.msgid_bugs_address = value
524
+ elif name == 'last-translator':
525
+ self.last_translator = value
526
+ elif name == 'language':
527
+ value = value.replace('-', '_')
528
+ # The `or None` makes sure that the locale is set to None
529
+ # if the header's value is an empty string, which is what
530
+ # some tools generate (instead of eliding the empty Language
531
+ # header altogether).
532
+ self._set_locale(value or None)
533
+ elif name == 'language-team':
534
+ self.language_team = value
535
+ elif name == 'content-type':
536
+ params = parse_separated_header(value)
537
+ if 'charset' in params:
538
+ self.charset = params['charset'].lower()
539
+ elif name == 'plural-forms':
540
+ params = parse_separated_header(f" ;{value}")
541
+ self._num_plurals = int(params.get('nplurals', 2))
542
+ self._plural_expr = params.get('plural', '(n != 1)')
543
+ elif name == 'pot-creation-date':
544
+ self.creation_date = _parse_datetime_header(value)
545
+ elif name == 'po-revision-date':
546
+ # Keep the value if it's not the default one
547
+ if 'YEAR' not in value:
548
+ self.revision_date = _parse_datetime_header(value)
549
+
550
+ mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
551
+ The MIME headers of the catalog, used for the special ``msgid ""`` entry.
552
+
553
+ The behavior of this property changes slightly depending on whether a locale
554
+ is set or not, the latter indicating that the catalog is actually a template
555
+ for actual translations.
556
+
557
+ Here's an example of the output for such a catalog template:
558
+
559
+ >>> from babel.dates import UTC
560
+ >>> from datetime import datetime
561
+ >>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
562
+ >>> catalog = Catalog(project='Foobar', version='1.0',
563
+ ... creation_date=created)
564
+ >>> for name, value in catalog.mime_headers:
565
+ ... print('%s: %s' % (name, value))
566
+ Project-Id-Version: Foobar 1.0
567
+ Report-Msgid-Bugs-To: EMAIL@ADDRESS
568
+ POT-Creation-Date: 1990-04-01 15:30+0000
569
+ PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
570
+ Last-Translator: FULL NAME <EMAIL@ADDRESS>
571
+ Language-Team: LANGUAGE <[email protected]>
572
+ MIME-Version: 1.0
573
+ Content-Type: text/plain; charset=utf-8
574
+ Content-Transfer-Encoding: 8bit
575
+ Generated-By: Babel ...
576
+
577
+ And here's an example of the output when the locale is set:
578
+
579
+ >>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)
580
+ >>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',
581
+ ... creation_date=created, revision_date=revised,
582
+ ... last_translator='John Doe <[email protected]>',
583
+ ... language_team='de_DE <[email protected]>')
584
+ >>> for name, value in catalog.mime_headers:
585
+ ... print('%s: %s' % (name, value))
586
+ Project-Id-Version: Foobar 1.0
587
+ Report-Msgid-Bugs-To: EMAIL@ADDRESS
588
+ POT-Creation-Date: 1990-04-01 15:30+0000
589
+ PO-Revision-Date: 1990-08-03 12:00+0000
590
+ Last-Translator: John Doe <[email protected]>
591
+ Language: de_DE
592
+ Language-Team: de_DE <[email protected]>
593
+ Plural-Forms: nplurals=2; plural=(n != 1);
594
+ MIME-Version: 1.0
595
+ Content-Type: text/plain; charset=utf-8
596
+ Content-Transfer-Encoding: 8bit
597
+ Generated-By: Babel ...
598
+
599
+ :type: `list`
600
+ """)
601
+
602
+ @property
603
+ def num_plurals(self) -> int:
604
+ """The number of plurals used by the catalog or locale.
605
+
606
+ >>> Catalog(locale='en').num_plurals
607
+ 2
608
+ >>> Catalog(locale='ga').num_plurals
609
+ 5
610
+
611
+ :type: `int`"""
612
+ if self._num_plurals is None:
613
+ num = 2
614
+ if self.locale:
615
+ num = get_plural(self.locale)[0]
616
+ self._num_plurals = num
617
+ return self._num_plurals
618
+
619
+ @property
620
+ def plural_expr(self) -> str:
621
+ """The plural expression used by the catalog or locale.
622
+
623
+ >>> Catalog(locale='en').plural_expr
624
+ '(n != 1)'
625
+ >>> Catalog(locale='ga').plural_expr
626
+ '(n==1 ? 0 : n==2 ? 1 : n>=3 && n<=6 ? 2 : n>=7 && n<=10 ? 3 : 4)'
627
+ >>> Catalog(locale='ding').plural_expr # unknown locale
628
+ '(n != 1)'
629
+
630
+ :type: `str`"""
631
+ if self._plural_expr is None:
632
+ expr = '(n != 1)'
633
+ if self.locale:
634
+ expr = get_plural(self.locale)[1]
635
+ self._plural_expr = expr
636
+ return self._plural_expr
637
+
638
+ @property
639
+ def plural_forms(self) -> str:
640
+ """Return the plural forms declaration for the locale.
641
+
642
+ >>> Catalog(locale='en').plural_forms
643
+ 'nplurals=2; plural=(n != 1);'
644
+ >>> Catalog(locale='pt_BR').plural_forms
645
+ 'nplurals=2; plural=(n > 1);'
646
+
647
+ :type: `str`"""
648
+ return f"nplurals={self.num_plurals}; plural={self.plural_expr};"
649
+
650
+ def __contains__(self, id: _MessageID) -> bool:
651
+ """Return whether the catalog has a message with the specified ID."""
652
+ return self._key_for(id) in self._messages
653
+
654
+ def __len__(self) -> int:
655
+ """The number of messages in the catalog.
656
+
657
+ This does not include the special ``msgid ""`` entry."""
658
+ return len(self._messages)
659
+
660
+ def __iter__(self) -> Iterator[Message]:
661
+ """Iterates through all the entries in the catalog, in the order they
662
+ were added, yielding a `Message` object for every entry.
663
+
664
+ :rtype: ``iterator``"""
665
+ buf = []
666
+ for name, value in self.mime_headers:
667
+ buf.append(f"{name}: {value}")
668
+ flags = set()
669
+ if self.fuzzy:
670
+ flags |= {'fuzzy'}
671
+ yield Message('', '\n'.join(buf), flags=flags)
672
+ for key in self._messages:
673
+ yield self._messages[key]
674
+
675
+ def __repr__(self) -> str:
676
+ locale = ''
677
+ if self.locale:
678
+ locale = f" {self.locale}"
679
+ return f"<{type(self).__name__} {self.domain!r}{locale}>"
680
+
681
+ def __delitem__(self, id: _MessageID) -> None:
682
+ """Delete the message with the specified ID."""
683
+ self.delete(id)
684
+
685
+ def __getitem__(self, id: _MessageID) -> Message:
686
+ """Return the message with the specified ID.
687
+
688
+ :param id: the message ID
689
+ """
690
+ return self.get(id)
691
+
692
+ def __setitem__(self, id: _MessageID, message: Message) -> None:
693
+ """Add or update the message with the specified ID.
694
+
695
+ >>> catalog = Catalog()
696
+ >>> catalog[u'foo'] = Message(u'foo')
697
+ >>> catalog[u'foo']
698
+ <Message u'foo' (flags: [])>
699
+
700
+ If a message with that ID is already in the catalog, it is updated
701
+ to include the locations and flags of the new message.
702
+
703
+ >>> catalog = Catalog()
704
+ >>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
705
+ >>> catalog[u'foo'].locations
706
+ [('main.py', 1)]
707
+ >>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
708
+ >>> catalog[u'foo'].locations
709
+ [('main.py', 1), ('utils.py', 5)]
710
+
711
+ :param id: the message ID
712
+ :param message: the `Message` object
713
+ """
714
+ assert isinstance(message, Message), 'expected a Message object'
715
+ key = self._key_for(id, message.context)
716
+ current = self._messages.get(key)
717
+ if current:
718
+ if message.pluralizable and not current.pluralizable:
719
+ # The new message adds pluralization
720
+ current.id = message.id
721
+ current.string = message.string
722
+ current.locations = list(distinct(current.locations +
723
+ message.locations))
724
+ current.auto_comments = list(distinct(current.auto_comments +
725
+ message.auto_comments))
726
+ current.user_comments = list(distinct(current.user_comments +
727
+ message.user_comments))
728
+ current.flags |= message.flags
729
+ elif id == '':
730
+ # special treatment for the header message
731
+ self.mime_headers = message_from_string(message.string).items()
732
+ self.header_comment = "\n".join([f"# {c}".rstrip() for c in message.user_comments])
733
+ self.fuzzy = message.fuzzy
734
+ else:
735
+ if isinstance(id, (list, tuple)):
736
+ assert isinstance(message.string, (list, tuple)), \
737
+ f"Expected sequence but got {type(message.string)}"
738
+ self._messages[key] = message
739
+
740
+ def add(
741
+ self,
742
+ id: _MessageID,
743
+ string: _MessageID | None = None,
744
+ locations: Iterable[tuple[str, int]] = (),
745
+ flags: Iterable[str] = (),
746
+ auto_comments: Iterable[str] = (),
747
+ user_comments: Iterable[str] = (),
748
+ previous_id: _MessageID = (),
749
+ lineno: int | None = None,
750
+ context: str | None = None,
751
+ ) -> Message:
752
+ """Add or update the message with the specified ID.
753
+
754
+ >>> catalog = Catalog()
755
+ >>> catalog.add(u'foo')
756
+ <Message ...>
757
+ >>> catalog[u'foo']
758
+ <Message u'foo' (flags: [])>
759
+
760
+ This method simply constructs a `Message` object with the given
761
+ arguments and invokes `__setitem__` with that object.
762
+
763
+ :param id: the message ID, or a ``(singular, plural)`` tuple for
764
+ pluralizable messages
765
+ :param string: the translated message string, or a
766
+ ``(singular, plural)`` tuple for pluralizable messages
767
+ :param locations: a sequence of ``(filename, lineno)`` tuples
768
+ :param flags: a set or sequence of flags
769
+ :param auto_comments: a sequence of automatic comments
770
+ :param user_comments: a sequence of user comments
771
+ :param previous_id: the previous message ID, or a ``(singular, plural)``
772
+ tuple for pluralizable messages
773
+ :param lineno: the line number on which the msgid line was found in the
774
+ PO file, if any
775
+ :param context: the message context
776
+ """
777
+ message = Message(id, string, list(locations), flags, auto_comments,
778
+ user_comments, previous_id, lineno=lineno,
779
+ context=context)
780
+ self[id] = message
781
+ return message
782
+
783
+ def check(self) -> Iterable[tuple[Message, list[TranslationError]]]:
784
+ """Run various validation checks on the translations in the catalog.
785
+
786
+ For every message which fails validation, this method yield a
787
+ ``(message, errors)`` tuple, where ``message`` is the `Message` object
788
+ and ``errors`` is a sequence of `TranslationError` objects.
789
+
790
+ :rtype: ``generator`` of ``(message, errors)``
791
+ """
792
+ for message in self._messages.values():
793
+ errors = message.check(catalog=self)
794
+ if errors:
795
+ yield message, errors
796
+
797
+ def get(self, id: _MessageID, context: str | None = None) -> Message | None:
798
+ """Return the message with the specified ID and context.
799
+
800
+ :param id: the message ID
801
+ :param context: the message context, or ``None`` for no context
802
+ """
803
+ return self._messages.get(self._key_for(id, context))
804
+
805
+ def delete(self, id: _MessageID, context: str | None = None) -> None:
806
+ """Delete the message with the specified ID and context.
807
+
808
+ :param id: the message ID
809
+ :param context: the message context, or ``None`` for no context
810
+ """
811
+ key = self._key_for(id, context)
812
+ if key in self._messages:
813
+ del self._messages[key]
814
+
815
+ def update(
816
+ self,
817
+ template: Catalog,
818
+ no_fuzzy_matching: bool = False,
819
+ update_header_comment: bool = False,
820
+ keep_user_comments: bool = True,
821
+ update_creation_date: bool = True,
822
+ ) -> None:
823
+ """Update the catalog based on the given template catalog.
824
+
825
+ >>> from babel.messages import Catalog
826
+ >>> template = Catalog()
827
+ >>> template.add('green', locations=[('main.py', 99)])
828
+ <Message ...>
829
+ >>> template.add('blue', locations=[('main.py', 100)])
830
+ <Message ...>
831
+ >>> template.add(('salad', 'salads'), locations=[('util.py', 42)])
832
+ <Message ...>
833
+ >>> catalog = Catalog(locale='de_DE')
834
+ >>> catalog.add('blue', u'blau', locations=[('main.py', 98)])
835
+ <Message ...>
836
+ >>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])
837
+ <Message ...>
838
+ >>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),
839
+ ... locations=[('util.py', 38)])
840
+ <Message ...>
841
+
842
+ >>> catalog.update(template)
843
+ >>> len(catalog)
844
+ 3
845
+
846
+ >>> msg1 = catalog['green']
847
+ >>> msg1.string
848
+ >>> msg1.locations
849
+ [('main.py', 99)]
850
+
851
+ >>> msg2 = catalog['blue']
852
+ >>> msg2.string
853
+ u'blau'
854
+ >>> msg2.locations
855
+ [('main.py', 100)]
856
+
857
+ >>> msg3 = catalog['salad']
858
+ >>> msg3.string
859
+ (u'Salat', u'Salate')
860
+ >>> msg3.locations
861
+ [('util.py', 42)]
862
+
863
+ Messages that are in the catalog but not in the template are removed
864
+ from the main collection, but can still be accessed via the `obsolete`
865
+ member:
866
+
867
+ >>> 'head' in catalog
868
+ False
869
+ >>> list(catalog.obsolete.values())
870
+ [<Message 'head' (flags: [])>]
871
+
872
+ :param template: the reference catalog, usually read from a POT file
873
+ :param no_fuzzy_matching: whether to use fuzzy matching of message IDs
874
+ :param update_header_comment: whether to copy the header comment from the template
875
+ :param keep_user_comments: whether to keep user comments from the old catalog
876
+ :param update_creation_date: whether to copy the creation date from the template
877
+ """
878
+ messages = self._messages
879
+ remaining = messages.copy()
880
+ self._messages = {}
881
+
882
+ # Prepare for fuzzy matching
883
+ fuzzy_candidates = {}
884
+ if not no_fuzzy_matching:
885
+ for msgid in messages:
886
+ if msgid and messages[msgid].string:
887
+ key = self._key_for(msgid)
888
+ ctxt = messages[msgid].context
889
+ fuzzy_candidates[self._to_fuzzy_match_key(key)] = (key, ctxt)
890
+ fuzzy_matches = set()
891
+
892
+ def _merge(message: Message, oldkey: tuple[str, str] | str, newkey: tuple[str, str] | str) -> None:
893
+ message = message.clone()
894
+ fuzzy = False
895
+ if oldkey != newkey:
896
+ fuzzy = True
897
+ fuzzy_matches.add(oldkey)
898
+ oldmsg = messages.get(oldkey)
899
+ assert oldmsg is not None
900
+ if isinstance(oldmsg.id, str):
901
+ message.previous_id = [oldmsg.id]
902
+ else:
903
+ message.previous_id = list(oldmsg.id)
904
+ else:
905
+ oldmsg = remaining.pop(oldkey, None)
906
+ assert oldmsg is not None
907
+ message.string = oldmsg.string
908
+
909
+ if keep_user_comments:
910
+ message.user_comments = list(distinct(oldmsg.user_comments))
911
+
912
+ if isinstance(message.id, (list, tuple)):
913
+ if not isinstance(message.string, (list, tuple)):
914
+ fuzzy = True
915
+ message.string = tuple(
916
+ [message.string] + ([''] * (len(message.id) - 1)),
917
+ )
918
+ elif len(message.string) != self.num_plurals:
919
+ fuzzy = True
920
+ message.string = tuple(message.string[:len(oldmsg.string)])
921
+ elif isinstance(message.string, (list, tuple)):
922
+ fuzzy = True
923
+ message.string = message.string[0]
924
+ message.flags |= oldmsg.flags
925
+ if fuzzy:
926
+ message.flags |= {'fuzzy'}
927
+ self[message.id] = message
928
+
929
+ for message in template:
930
+ if message.id:
931
+ key = self._key_for(message.id, message.context)
932
+ if key in messages:
933
+ _merge(message, key, key)
934
+ else:
935
+ if not no_fuzzy_matching:
936
+ # do some fuzzy matching with difflib
937
+ matches = get_close_matches(
938
+ self._to_fuzzy_match_key(key),
939
+ fuzzy_candidates.keys(),
940
+ 1,
941
+ )
942
+ if matches:
943
+ modified_key = matches[0]
944
+ newkey, newctxt = fuzzy_candidates[modified_key]
945
+ if newctxt is not None:
946
+ newkey = newkey, newctxt
947
+ _merge(message, newkey, key)
948
+ continue
949
+
950
+ self[message.id] = message
951
+
952
+ for msgid in remaining:
953
+ if no_fuzzy_matching or msgid not in fuzzy_matches:
954
+ self.obsolete[msgid] = remaining[msgid]
955
+
956
+ if update_header_comment:
957
+ # Allow the updated catalog's header to be rewritten based on the
958
+ # template's header
959
+ self.header_comment = template.header_comment
960
+
961
+ # Make updated catalog's POT-Creation-Date equal to the template
962
+ # used to update the catalog
963
+ if update_creation_date:
964
+ self.creation_date = template.creation_date
965
+
966
+ def _to_fuzzy_match_key(self, key: tuple[str, str] | str) -> str:
967
+ """Converts a message key to a string suitable for fuzzy matching."""
968
+ if isinstance(key, tuple):
969
+ matchkey = key[0] # just the msgid, no context
970
+ else:
971
+ matchkey = key
972
+ return matchkey.lower().strip()
973
+
974
+ def _key_for(self, id: _MessageID, context: str | None = None) -> tuple[str, str] | str:
975
+ """The key for a message is just the singular ID even for pluralizable
976
+ messages, but is a ``(msgid, msgctxt)`` tuple for context-specific
977
+ messages.
978
+ """
979
+ key = id
980
+ if isinstance(key, (list, tuple)):
981
+ key = id[0]
982
+ if context is not None:
983
+ key = (key, context)
984
+ return key
985
+
986
+ def is_identical(self, other: Catalog) -> bool:
987
+ """Checks if catalogs are identical, taking into account messages and
988
+ headers.
989
+ """
990
+ assert isinstance(other, Catalog)
991
+ for key in self._messages.keys() | other._messages.keys():
992
+ message_1 = self.get(key)
993
+ message_2 = other.get(key)
994
+ if (
995
+ message_1 is None
996
+ or message_2 is None
997
+ or not message_1.is_identical(message_2)
998
+ ):
999
+ return False
1000
+ return dict(self.mime_headers) == dict(other.mime_headers)
lib/python3.10/site-packages/babel/messages/checkers.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.messages.checkers
3
+ ~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Various routines that help with validation of translations.
6
+
7
+ :since: version 0.9
8
+
9
+ :copyright: (c) 2013-2025 by the Babel Team.
10
+ :license: BSD, see LICENSE for more details.
11
+ """
12
+ from __future__ import annotations
13
+
14
+ from collections.abc import Callable
15
+
16
+ from babel.messages.catalog import PYTHON_FORMAT, Catalog, Message, TranslationError
17
+
18
+ #: list of format chars that are compatible to each other
19
+ _string_format_compatibilities = [
20
+ {'i', 'd', 'u'},
21
+ {'x', 'X'},
22
+ {'f', 'F', 'g', 'G'},
23
+ ]
24
+
25
+
26
+ def num_plurals(catalog: Catalog | None, message: Message) -> None:
27
+ """Verify the number of plurals in the translation."""
28
+ if not message.pluralizable:
29
+ if not isinstance(message.string, str):
30
+ raise TranslationError("Found plural forms for non-pluralizable "
31
+ "message")
32
+ return
33
+
34
+ # skip further tests if no catalog is provided.
35
+ elif catalog is None:
36
+ return
37
+
38
+ msgstrs = message.string
39
+ if not isinstance(msgstrs, (list, tuple)):
40
+ msgstrs = (msgstrs,)
41
+ if len(msgstrs) != catalog.num_plurals:
42
+ raise TranslationError("Wrong number of plural forms (expected %d)" %
43
+ catalog.num_plurals)
44
+
45
+
46
+ def python_format(catalog: Catalog | None, message: Message) -> None:
47
+ """Verify the format string placeholders in the translation."""
48
+ if 'python-format' not in message.flags:
49
+ return
50
+ msgids = message.id
51
+ if not isinstance(msgids, (list, tuple)):
52
+ msgids = (msgids,)
53
+ msgstrs = message.string
54
+ if not isinstance(msgstrs, (list, tuple)):
55
+ msgstrs = (msgstrs,)
56
+
57
+ for msgid, msgstr in zip(msgids, msgstrs):
58
+ if msgstr:
59
+ _validate_format(msgid, msgstr)
60
+
61
+
62
+ def _validate_format(format: str, alternative: str) -> None:
63
+ """Test format string `alternative` against `format`. `format` can be the
64
+ msgid of a message and `alternative` one of the `msgstr`\\s. The two
65
+ arguments are not interchangeable as `alternative` may contain less
66
+ placeholders if `format` uses named placeholders.
67
+
68
+ If the string formatting of `alternative` is compatible to `format` the
69
+ function returns `None`, otherwise a `TranslationError` is raised.
70
+
71
+ Examples for compatible format strings:
72
+
73
+ >>> _validate_format('Hello %s!', 'Hallo %s!')
74
+ >>> _validate_format('Hello %i!', 'Hallo %d!')
75
+
76
+ Example for an incompatible format strings:
77
+
78
+ >>> _validate_format('Hello %(name)s!', 'Hallo %s!')
79
+ Traceback (most recent call last):
80
+ ...
81
+ TranslationError: the format strings are of different kinds
82
+
83
+ This function is used by the `python_format` checker.
84
+
85
+ :param format: The original format string
86
+ :param alternative: The alternative format string that should be checked
87
+ against format
88
+ :raises TranslationError: on formatting errors
89
+ """
90
+
91
+ def _parse(string: str) -> list[tuple[str, str]]:
92
+ result: list[tuple[str, str]] = []
93
+ for match in PYTHON_FORMAT.finditer(string):
94
+ name, format, typechar = match.groups()
95
+ if typechar == '%' and name is None:
96
+ continue
97
+ result.append((name, str(typechar)))
98
+ return result
99
+
100
+ def _compatible(a: str, b: str) -> bool:
101
+ if a == b:
102
+ return True
103
+ for set in _string_format_compatibilities:
104
+ if a in set and b in set:
105
+ return True
106
+ return False
107
+
108
+ def _check_positional(results: list[tuple[str, str]]) -> bool:
109
+ positional = None
110
+ for name, _char in results:
111
+ if positional is None:
112
+ positional = name is None
113
+ else:
114
+ if (name is None) != positional:
115
+ raise TranslationError('format string mixes positional '
116
+ 'and named placeholders')
117
+ return bool(positional)
118
+
119
+ a, b = map(_parse, (format, alternative))
120
+
121
+ if not a:
122
+ return
123
+
124
+ # now check if both strings are positional or named
125
+ a_positional, b_positional = map(_check_positional, (a, b))
126
+ if a_positional and not b_positional and not b:
127
+ raise TranslationError('placeholders are incompatible')
128
+ elif a_positional != b_positional:
129
+ raise TranslationError('the format strings are of different kinds')
130
+
131
+ # if we are operating on positional strings both must have the
132
+ # same number of format chars and those must be compatible
133
+ if a_positional:
134
+ if len(a) != len(b):
135
+ raise TranslationError('positional format placeholders are '
136
+ 'unbalanced')
137
+ for idx, ((_, first), (_, second)) in enumerate(zip(a, b)):
138
+ if not _compatible(first, second):
139
+ raise TranslationError('incompatible format for placeholder '
140
+ '%d: %r and %r are not compatible' %
141
+ (idx + 1, first, second))
142
+
143
+ # otherwise the second string must not have names the first one
144
+ # doesn't have and the types of those included must be compatible
145
+ else:
146
+ type_map = dict(a)
147
+ for name, typechar in b:
148
+ if name not in type_map:
149
+ raise TranslationError(f'unknown named placeholder {name!r}')
150
+ elif not _compatible(typechar, type_map[name]):
151
+ raise TranslationError(
152
+ f'incompatible format for placeholder {name!r}: '
153
+ f'{typechar!r} and {type_map[name]!r} are not compatible',
154
+ )
155
+
156
+
157
+ def _find_checkers() -> list[Callable[[Catalog | None, Message], object]]:
158
+ from babel.messages._compat import find_entrypoints
159
+ checkers: list[Callable[[Catalog | None, Message], object]] = []
160
+ checkers.extend(load() for (name, load) in find_entrypoints('babel.checkers'))
161
+ if len(checkers) == 0:
162
+ # if entrypoints are not available or no usable egg-info was found
163
+ # (see #230), just resort to hard-coded checkers
164
+ return [num_plurals, python_format]
165
+ return checkers
166
+
167
+
168
+ checkers: list[Callable[[Catalog | None, Message], object]] = _find_checkers()
lib/python3.10/site-packages/babel/messages/extract.py ADDED
@@ -0,0 +1,852 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.messages.extract
3
+ ~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Basic infrastructure for extracting localizable messages from source files.
6
+
7
+ This module defines an extensible system for collecting localizable message
8
+ strings from a variety of sources. A native extractor for Python source
9
+ files is builtin, extractors for other sources can be added using very
10
+ simple plugins.
11
+
12
+ The main entry points into the extraction functionality are the functions
13
+ `extract_from_dir` and `extract_from_file`.
14
+
15
+ :copyright: (c) 2013-2025 by the Babel Team.
16
+ :license: BSD, see LICENSE for more details.
17
+ """
18
+ from __future__ import annotations
19
+
20
+ import ast
21
+ import io
22
+ import os
23
+ import sys
24
+ import tokenize
25
+ from collections.abc import (
26
+ Callable,
27
+ Collection,
28
+ Generator,
29
+ Iterable,
30
+ Mapping,
31
+ MutableSequence,
32
+ )
33
+ from functools import lru_cache
34
+ from os.path import relpath
35
+ from textwrap import dedent
36
+ from tokenize import COMMENT, NAME, NL, OP, STRING, generate_tokens
37
+ from typing import TYPE_CHECKING, Any, TypedDict
38
+
39
+ from babel.messages._compat import find_entrypoints
40
+ from babel.util import parse_encoding, parse_future_flags, pathmatch
41
+
42
+ if TYPE_CHECKING:
43
+ from typing import IO, Final, Protocol
44
+
45
+ from _typeshed import SupportsItems, SupportsRead, SupportsReadline
46
+ from typing_extensions import TypeAlias
47
+
48
+ class _PyOptions(TypedDict, total=False):
49
+ encoding: str
50
+
51
+ class _JSOptions(TypedDict, total=False):
52
+ encoding: str
53
+ jsx: bool
54
+ template_string: bool
55
+ parse_template_string: bool
56
+
57
+ class _FileObj(SupportsRead[bytes], SupportsReadline[bytes], Protocol):
58
+ def seek(self, __offset: int, __whence: int = ...) -> int: ...
59
+ def tell(self) -> int: ...
60
+
61
+ _SimpleKeyword: TypeAlias = tuple[int | tuple[int, int] | tuple[int, str], ...] | None
62
+ _Keyword: TypeAlias = dict[int | None, _SimpleKeyword] | _SimpleKeyword
63
+
64
+ # 5-tuple of (filename, lineno, messages, comments, context)
65
+ _FileExtractionResult: TypeAlias = tuple[str, int, str | tuple[str, ...], list[str], str | None]
66
+
67
+ # 4-tuple of (lineno, message, comments, context)
68
+ _ExtractionResult: TypeAlias = tuple[int, str | tuple[str, ...], list[str], str | None]
69
+
70
+ # Required arguments: fileobj, keywords, comment_tags, options
71
+ # Return value: Iterable of (lineno, message, comments, context)
72
+ _CallableExtractionMethod: TypeAlias = Callable[
73
+ [_FileObj | IO[bytes], Mapping[str, _Keyword], Collection[str], Mapping[str, Any]],
74
+ Iterable[_ExtractionResult],
75
+ ]
76
+
77
+ _ExtractionMethod: TypeAlias = _CallableExtractionMethod | str
78
+
79
+ GROUP_NAME: Final[str] = 'babel.extractors'
80
+
81
+ DEFAULT_KEYWORDS: dict[str, _Keyword] = {
82
+ '_': None,
83
+ 'gettext': None,
84
+ 'ngettext': (1, 2),
85
+ 'ugettext': None,
86
+ 'ungettext': (1, 2),
87
+ 'dgettext': (2,),
88
+ 'dngettext': (2, 3),
89
+ 'N_': None,
90
+ 'pgettext': ((1, 'c'), 2),
91
+ 'npgettext': ((1, 'c'), 2, 3),
92
+ }
93
+
94
+ DEFAULT_MAPPING: list[tuple[str, str]] = [('**.py', 'python')]
95
+
96
+ # New tokens in Python 3.12, or None on older versions
97
+ FSTRING_START = getattr(tokenize, "FSTRING_START", None)
98
+ FSTRING_MIDDLE = getattr(tokenize, "FSTRING_MIDDLE", None)
99
+ FSTRING_END = getattr(tokenize, "FSTRING_END", None)
100
+
101
+
102
+ def _strip_comment_tags(comments: MutableSequence[str], tags: Iterable[str]):
103
+ """Helper function for `extract` that strips comment tags from strings
104
+ in a list of comment lines. This functions operates in-place.
105
+ """
106
+ def _strip(line: str):
107
+ for tag in tags:
108
+ if line.startswith(tag):
109
+ return line[len(tag):].strip()
110
+ return line
111
+ comments[:] = map(_strip, comments)
112
+
113
+
114
+ def default_directory_filter(dirpath: str | os.PathLike[str]) -> bool:
115
+ subdir = os.path.basename(dirpath)
116
+ # Legacy default behavior: ignore dot and underscore directories
117
+ return not (subdir.startswith('.') or subdir.startswith('_'))
118
+
119
+
120
+ def extract_from_dir(
121
+ dirname: str | os.PathLike[str] | None = None,
122
+ method_map: Iterable[tuple[str, str]] = DEFAULT_MAPPING,
123
+ options_map: SupportsItems[str, dict[str, Any]] | None = None,
124
+ keywords: Mapping[str, _Keyword] = DEFAULT_KEYWORDS,
125
+ comment_tags: Collection[str] = (),
126
+ callback: Callable[[str, str, dict[str, Any]], object] | None = None,
127
+ strip_comment_tags: bool = False,
128
+ directory_filter: Callable[[str], bool] | None = None,
129
+ ) -> Generator[_FileExtractionResult, None, None]:
130
+ """Extract messages from any source files found in the given directory.
131
+
132
+ This function generates tuples of the form ``(filename, lineno, message,
133
+ comments, context)``.
134
+
135
+ Which extraction method is used per file is determined by the `method_map`
136
+ parameter, which maps extended glob patterns to extraction method names.
137
+ For example, the following is the default mapping:
138
+
139
+ >>> method_map = [
140
+ ... ('**.py', 'python')
141
+ ... ]
142
+
143
+ This basically says that files with the filename extension ".py" at any
144
+ level inside the directory should be processed by the "python" extraction
145
+ method. Files that don't match any of the mapping patterns are ignored. See
146
+ the documentation of the `pathmatch` function for details on the pattern
147
+ syntax.
148
+
149
+ The following extended mapping would also use the "genshi" extraction
150
+ method on any file in "templates" subdirectory:
151
+
152
+ >>> method_map = [
153
+ ... ('**/templates/**.*', 'genshi'),
154
+ ... ('**.py', 'python')
155
+ ... ]
156
+
157
+ The dictionary provided by the optional `options_map` parameter augments
158
+ these mappings. It uses extended glob patterns as keys, and the values are
159
+ dictionaries mapping options names to option values (both strings).
160
+
161
+ The glob patterns of the `options_map` do not necessarily need to be the
162
+ same as those used in the method mapping. For example, while all files in
163
+ the ``templates`` folders in an application may be Genshi applications, the
164
+ options for those files may differ based on extension:
165
+
166
+ >>> options_map = {
167
+ ... '**/templates/**.txt': {
168
+ ... 'template_class': 'genshi.template:TextTemplate',
169
+ ... 'encoding': 'latin-1'
170
+ ... },
171
+ ... '**/templates/**.html': {
172
+ ... 'include_attrs': ''
173
+ ... }
174
+ ... }
175
+
176
+ :param dirname: the path to the directory to extract messages from. If
177
+ not given the current working directory is used.
178
+ :param method_map: a list of ``(pattern, method)`` tuples that maps of
179
+ extraction method names to extended glob patterns
180
+ :param options_map: a dictionary of additional options (optional)
181
+ :param keywords: a dictionary mapping keywords (i.e. names of functions
182
+ that should be recognized as translation functions) to
183
+ tuples that specify which of their arguments contain
184
+ localizable strings
185
+ :param comment_tags: a list of tags of translator comments to search for
186
+ and include in the results
187
+ :param callback: a function that is called for every file that message are
188
+ extracted from, just before the extraction itself is
189
+ performed; the function is passed the filename, the name
190
+ of the extraction method and and the options dictionary as
191
+ positional arguments, in that order
192
+ :param strip_comment_tags: a flag that if set to `True` causes all comment
193
+ tags to be removed from the collected comments.
194
+ :param directory_filter: a callback to determine whether a directory should
195
+ be recursed into. Receives the full directory path;
196
+ should return True if the directory is valid.
197
+ :see: `pathmatch`
198
+ """
199
+ if dirname is None:
200
+ dirname = os.getcwd()
201
+ if options_map is None:
202
+ options_map = {}
203
+ if directory_filter is None:
204
+ directory_filter = default_directory_filter
205
+
206
+ absname = os.path.abspath(dirname)
207
+ for root, dirnames, filenames in os.walk(absname):
208
+ dirnames[:] = [
209
+ subdir for subdir in dirnames
210
+ if directory_filter(os.path.join(root, subdir))
211
+ ]
212
+ dirnames.sort()
213
+ filenames.sort()
214
+ for filename in filenames:
215
+ filepath = os.path.join(root, filename).replace(os.sep, '/')
216
+
217
+ yield from check_and_call_extract_file(
218
+ filepath,
219
+ method_map,
220
+ options_map,
221
+ callback,
222
+ keywords,
223
+ comment_tags,
224
+ strip_comment_tags,
225
+ dirpath=absname,
226
+ )
227
+
228
+
229
+ def check_and_call_extract_file(
230
+ filepath: str | os.PathLike[str],
231
+ method_map: Iterable[tuple[str, str]],
232
+ options_map: SupportsItems[str, dict[str, Any]],
233
+ callback: Callable[[str, str, dict[str, Any]], object] | None,
234
+ keywords: Mapping[str, _Keyword],
235
+ comment_tags: Collection[str],
236
+ strip_comment_tags: bool,
237
+ dirpath: str | os.PathLike[str] | None = None,
238
+ ) -> Generator[_FileExtractionResult, None, None]:
239
+ """Checks if the given file matches an extraction method mapping, and if so, calls extract_from_file.
240
+
241
+ Note that the extraction method mappings are based relative to dirpath.
242
+ So, given an absolute path to a file `filepath`, we want to check using
243
+ just the relative path from `dirpath` to `filepath`.
244
+
245
+ Yields 5-tuples (filename, lineno, messages, comments, context).
246
+
247
+ :param filepath: An absolute path to a file that exists.
248
+ :param method_map: a list of ``(pattern, method)`` tuples that maps of
249
+ extraction method names to extended glob patterns
250
+ :param options_map: a dictionary of additional options (optional)
251
+ :param callback: a function that is called for every file that message are
252
+ extracted from, just before the extraction itself is
253
+ performed; the function is passed the filename, the name
254
+ of the extraction method and and the options dictionary as
255
+ positional arguments, in that order
256
+ :param keywords: a dictionary mapping keywords (i.e. names of functions
257
+ that should be recognized as translation functions) to
258
+ tuples that specify which of their arguments contain
259
+ localizable strings
260
+ :param comment_tags: a list of tags of translator comments to search for
261
+ and include in the results
262
+ :param strip_comment_tags: a flag that if set to `True` causes all comment
263
+ tags to be removed from the collected comments.
264
+ :param dirpath: the path to the directory to extract messages from.
265
+ :return: iterable of 5-tuples (filename, lineno, messages, comments, context)
266
+ :rtype: Iterable[tuple[str, int, str|tuple[str], list[str], str|None]
267
+ """
268
+ # filename is the relative path from dirpath to the actual file
269
+ filename = relpath(filepath, dirpath)
270
+
271
+ for pattern, method in method_map:
272
+ if not pathmatch(pattern, filename):
273
+ continue
274
+
275
+ options = {}
276
+ for opattern, odict in options_map.items():
277
+ if pathmatch(opattern, filename):
278
+ options = odict
279
+ break
280
+ if callback:
281
+ callback(filename, method, options)
282
+ for message_tuple in extract_from_file(
283
+ method, filepath,
284
+ keywords=keywords,
285
+ comment_tags=comment_tags,
286
+ options=options,
287
+ strip_comment_tags=strip_comment_tags,
288
+ ):
289
+ yield (filename, *message_tuple)
290
+
291
+ break
292
+
293
+
294
+ def extract_from_file(
295
+ method: _ExtractionMethod,
296
+ filename: str | os.PathLike[str],
297
+ keywords: Mapping[str, _Keyword] = DEFAULT_KEYWORDS,
298
+ comment_tags: Collection[str] = (),
299
+ options: Mapping[str, Any] | None = None,
300
+ strip_comment_tags: bool = False,
301
+ ) -> list[_ExtractionResult]:
302
+ """Extract messages from a specific file.
303
+
304
+ This function returns a list of tuples of the form ``(lineno, message, comments, context)``.
305
+
306
+ :param filename: the path to the file to extract messages from
307
+ :param method: a string specifying the extraction method (.e.g. "python")
308
+ :param keywords: a dictionary mapping keywords (i.e. names of functions
309
+ that should be recognized as translation functions) to
310
+ tuples that specify which of their arguments contain
311
+ localizable strings
312
+ :param comment_tags: a list of translator tags to search for and include
313
+ in the results
314
+ :param strip_comment_tags: a flag that if set to `True` causes all comment
315
+ tags to be removed from the collected comments.
316
+ :param options: a dictionary of additional options (optional)
317
+ :returns: list of tuples of the form ``(lineno, message, comments, context)``
318
+ :rtype: list[tuple[int, str|tuple[str], list[str], str|None]
319
+ """
320
+ if method == 'ignore':
321
+ return []
322
+
323
+ with open(filename, 'rb') as fileobj:
324
+ return list(extract(method, fileobj, keywords, comment_tags,
325
+ options, strip_comment_tags))
326
+
327
+
328
+ def _match_messages_against_spec(
329
+ lineno: int,
330
+ messages: list[str | None],
331
+ comments: list[str],
332
+ fileobj: _FileObj,
333
+ spec: tuple[int | tuple[int, str], ...],
334
+ ):
335
+ translatable = []
336
+ context = None
337
+
338
+ # last_index is 1 based like the keyword spec
339
+ last_index = len(messages)
340
+ for index in spec:
341
+ if isinstance(index, tuple): # (n, 'c')
342
+ context = messages[index[0] - 1]
343
+ continue
344
+ if last_index < index:
345
+ # Not enough arguments
346
+ return
347
+ message = messages[index - 1]
348
+ if message is None:
349
+ return
350
+ translatable.append(message)
351
+
352
+ # keyword spec indexes are 1 based, therefore '-1'
353
+ if isinstance(spec[0], tuple):
354
+ # context-aware *gettext method
355
+ first_msg_index = spec[1] - 1
356
+ else:
357
+ first_msg_index = spec[0] - 1
358
+ # An empty string msgid isn't valid, emit a warning
359
+ if not messages[first_msg_index]:
360
+ filename = (getattr(fileobj, "name", None) or "(unknown)")
361
+ sys.stderr.write(
362
+ f"{filename}:{lineno}: warning: Empty msgid. It is reserved by GNU gettext: gettext(\"\") "
363
+ f"returns the header entry with meta information, not the empty string.\n",
364
+ )
365
+ return
366
+
367
+ translatable = tuple(translatable)
368
+ if len(translatable) == 1:
369
+ translatable = translatable[0]
370
+
371
+ return lineno, translatable, comments, context
372
+
373
+
374
+ @lru_cache(maxsize=None)
375
+ def _find_extractor(name: str):
376
+ for ep_name, load in find_entrypoints(GROUP_NAME):
377
+ if ep_name == name:
378
+ return load()
379
+ return None
380
+
381
+
382
+ def extract(
383
+ method: _ExtractionMethod,
384
+ fileobj: _FileObj,
385
+ keywords: Mapping[str, _Keyword] = DEFAULT_KEYWORDS,
386
+ comment_tags: Collection[str] = (),
387
+ options: Mapping[str, Any] | None = None,
388
+ strip_comment_tags: bool = False,
389
+ ) -> Generator[_ExtractionResult, None, None]:
390
+ """Extract messages from the given file-like object using the specified
391
+ extraction method.
392
+
393
+ This function returns tuples of the form ``(lineno, message, comments, context)``.
394
+
395
+ The implementation dispatches the actual extraction to plugins, based on the
396
+ value of the ``method`` parameter.
397
+
398
+ >>> source = b'''# foo module
399
+ ... def run(argv):
400
+ ... print(_('Hello, world!'))
401
+ ... '''
402
+
403
+ >>> from io import BytesIO
404
+ >>> for message in extract('python', BytesIO(source)):
405
+ ... print(message)
406
+ (3, u'Hello, world!', [], None)
407
+
408
+ :param method: an extraction method (a callable), or
409
+ a string specifying the extraction method (.e.g. "python");
410
+ if this is a simple name, the extraction function will be
411
+ looked up by entry point; if it is an explicit reference
412
+ to a function (of the form ``package.module:funcname`` or
413
+ ``package.module.funcname``), the corresponding function
414
+ will be imported and used
415
+ :param fileobj: the file-like object the messages should be extracted from
416
+ :param keywords: a dictionary mapping keywords (i.e. names of functions
417
+ that should be recognized as translation functions) to
418
+ tuples that specify which of their arguments contain
419
+ localizable strings
420
+ :param comment_tags: a list of translator tags to search for and include
421
+ in the results
422
+ :param options: a dictionary of additional options (optional)
423
+ :param strip_comment_tags: a flag that if set to `True` causes all comment
424
+ tags to be removed from the collected comments.
425
+ :raise ValueError: if the extraction method is not registered
426
+ :returns: iterable of tuples of the form ``(lineno, message, comments, context)``
427
+ :rtype: Iterable[tuple[int, str|tuple[str], list[str], str|None]
428
+ """
429
+ if callable(method):
430
+ func = method
431
+ elif ':' in method or '.' in method:
432
+ if ':' not in method:
433
+ lastdot = method.rfind('.')
434
+ module, attrname = method[:lastdot], method[lastdot + 1:]
435
+ else:
436
+ module, attrname = method.split(':', 1)
437
+ func = getattr(__import__(module, {}, {}, [attrname]), attrname)
438
+ else:
439
+ func = _find_extractor(method)
440
+ if func is None:
441
+ # if no named entry point was found,
442
+ # we resort to looking up a builtin extractor
443
+ func = _BUILTIN_EXTRACTORS.get(method)
444
+
445
+ if func is None:
446
+ raise ValueError(f"Unknown extraction method {method!r}")
447
+
448
+ results = func(fileobj, keywords.keys(), comment_tags,
449
+ options=options or {})
450
+
451
+ for lineno, funcname, messages, comments in results:
452
+ if not isinstance(messages, (list, tuple)):
453
+ messages = [messages]
454
+ if not messages:
455
+ continue
456
+
457
+ specs = keywords[funcname] or None if funcname else None
458
+ # {None: x} may be collapsed into x for backwards compatibility.
459
+ if not isinstance(specs, dict):
460
+ specs = {None: specs}
461
+
462
+ if strip_comment_tags:
463
+ _strip_comment_tags(comments, comment_tags)
464
+
465
+ # None matches all arities.
466
+ for arity in (None, len(messages)):
467
+ try:
468
+ spec = specs[arity]
469
+ except KeyError:
470
+ continue
471
+ if spec is None:
472
+ spec = (1,)
473
+ result = _match_messages_against_spec(lineno, messages, comments, fileobj, spec)
474
+ if result is not None:
475
+ yield result
476
+
477
+
478
+ def extract_nothing(
479
+ fileobj: _FileObj,
480
+ keywords: Mapping[str, _Keyword],
481
+ comment_tags: Collection[str],
482
+ options: Mapping[str, Any],
483
+ ) -> list[_ExtractionResult]:
484
+ """Pseudo extractor that does not actually extract anything, but simply
485
+ returns an empty list.
486
+ """
487
+ return []
488
+
489
+
490
+ def extract_python(
491
+ fileobj: IO[bytes],
492
+ keywords: Mapping[str, _Keyword],
493
+ comment_tags: Collection[str],
494
+ options: _PyOptions,
495
+ ) -> Generator[_ExtractionResult, None, None]:
496
+ """Extract messages from Python source code.
497
+
498
+ It returns an iterator yielding tuples in the following form ``(lineno,
499
+ funcname, message, comments)``.
500
+
501
+ :param fileobj: the seekable, file-like object the messages should be
502
+ extracted from
503
+ :param keywords: a list of keywords (i.e. function names) that should be
504
+ recognized as translation functions
505
+ :param comment_tags: a list of translator tags to search for and include
506
+ in the results
507
+ :param options: a dictionary of additional options (optional)
508
+ :rtype: ``iterator``
509
+ """
510
+ funcname = lineno = message_lineno = None
511
+ call_stack = -1
512
+ buf = []
513
+ messages = []
514
+ translator_comments = []
515
+ in_def = in_translator_comments = False
516
+ comment_tag = None
517
+
518
+ encoding = parse_encoding(fileobj) or options.get('encoding', 'UTF-8')
519
+ future_flags = parse_future_flags(fileobj, encoding)
520
+ next_line = lambda: fileobj.readline().decode(encoding)
521
+
522
+ tokens = generate_tokens(next_line)
523
+
524
+ # Current prefix of a Python 3.12 (PEP 701) f-string, or None if we're not
525
+ # currently parsing one.
526
+ current_fstring_start = None
527
+
528
+ for tok, value, (lineno, _), _, _ in tokens:
529
+ if call_stack == -1 and tok == NAME and value in ('def', 'class'):
530
+ in_def = True
531
+ elif tok == OP and value == '(':
532
+ if in_def:
533
+ # Avoid false positives for declarations such as:
534
+ # def gettext(arg='message'):
535
+ in_def = False
536
+ continue
537
+ if funcname:
538
+ call_stack += 1
539
+ elif in_def and tok == OP and value == ':':
540
+ # End of a class definition without parens
541
+ in_def = False
542
+ continue
543
+ elif call_stack == -1 and tok == COMMENT:
544
+ # Strip the comment token from the line
545
+ value = value[1:].strip()
546
+ if in_translator_comments and \
547
+ translator_comments[-1][0] == lineno - 1:
548
+ # We're already inside a translator comment, continue appending
549
+ translator_comments.append((lineno, value))
550
+ continue
551
+ # If execution reaches this point, let's see if comment line
552
+ # starts with one of the comment tags
553
+ for comment_tag in comment_tags:
554
+ if value.startswith(comment_tag):
555
+ in_translator_comments = True
556
+ translator_comments.append((lineno, value))
557
+ break
558
+ elif funcname and call_stack == 0:
559
+ nested = (tok == NAME and value in keywords)
560
+ if (tok == OP and value == ')') or nested:
561
+ if buf:
562
+ messages.append(''.join(buf))
563
+ del buf[:]
564
+ else:
565
+ messages.append(None)
566
+
567
+ messages = tuple(messages) if len(messages) > 1 else messages[0]
568
+ # Comments don't apply unless they immediately
569
+ # precede the message
570
+ if translator_comments and \
571
+ translator_comments[-1][0] < message_lineno - 1:
572
+ translator_comments = []
573
+
574
+ yield (message_lineno, funcname, messages,
575
+ [comment[1] for comment in translator_comments])
576
+
577
+ funcname = lineno = message_lineno = None
578
+ call_stack = -1
579
+ messages = []
580
+ translator_comments = []
581
+ in_translator_comments = False
582
+ if nested:
583
+ funcname = value
584
+ elif tok == STRING:
585
+ val = _parse_python_string(value, encoding, future_flags)
586
+ if val is not None:
587
+ if not message_lineno:
588
+ message_lineno = lineno
589
+ buf.append(val)
590
+
591
+ # Python 3.12+, see https://peps.python.org/pep-0701/#new-tokens
592
+ elif tok == FSTRING_START:
593
+ current_fstring_start = value
594
+ if not message_lineno:
595
+ message_lineno = lineno
596
+ elif tok == FSTRING_MIDDLE:
597
+ if current_fstring_start is not None:
598
+ current_fstring_start += value
599
+ elif tok == FSTRING_END:
600
+ if current_fstring_start is not None:
601
+ fstring = current_fstring_start + value
602
+ val = _parse_python_string(fstring, encoding, future_flags)
603
+ if val is not None:
604
+ buf.append(val)
605
+
606
+ elif tok == OP and value == ',':
607
+ if buf:
608
+ messages.append(''.join(buf))
609
+ del buf[:]
610
+ else:
611
+ messages.append(None)
612
+ if translator_comments:
613
+ # We have translator comments, and since we're on a
614
+ # comma(,) user is allowed to break into a new line
615
+ # Let's increase the last comment's lineno in order
616
+ # for the comment to still be a valid one
617
+ old_lineno, old_comment = translator_comments.pop()
618
+ translator_comments.append((old_lineno + 1, old_comment))
619
+
620
+ elif tok != NL and not message_lineno:
621
+ message_lineno = lineno
622
+ elif call_stack > 0 and tok == OP and value == ')':
623
+ call_stack -= 1
624
+ elif funcname and call_stack == -1:
625
+ funcname = None
626
+ elif tok == NAME and value in keywords:
627
+ funcname = value
628
+
629
+ if current_fstring_start is not None and tok not in {FSTRING_START, FSTRING_MIDDLE}:
630
+ # In Python 3.12, tokens other than FSTRING_* mean the
631
+ # f-string is dynamic, so we don't wan't to extract it.
632
+ # And if it's FSTRING_END, we've already handled it above.
633
+ # Let's forget that we're in an f-string.
634
+ current_fstring_start = None
635
+
636
+
637
+ def _parse_python_string(value: str, encoding: str, future_flags: int) -> str | None:
638
+ # Unwrap quotes in a safe manner, maintaining the string's encoding
639
+ # https://sourceforge.net/tracker/?func=detail&atid=355470&aid=617979&group_id=5470
640
+ code = compile(
641
+ f'# coding={str(encoding)}\n{value}',
642
+ '<string>',
643
+ 'eval',
644
+ ast.PyCF_ONLY_AST | future_flags,
645
+ )
646
+ if isinstance(code, ast.Expression):
647
+ body = code.body
648
+ if isinstance(body, ast.Constant):
649
+ return body.value
650
+ if isinstance(body, ast.JoinedStr): # f-string
651
+ if all(isinstance(node, ast.Constant) for node in body.values):
652
+ return ''.join(node.value for node in body.values)
653
+ # TODO: we could raise an error or warning when not all nodes are constants
654
+ return None
655
+
656
+
657
+ def extract_javascript(
658
+ fileobj: _FileObj,
659
+ keywords: Mapping[str, _Keyword],
660
+ comment_tags: Collection[str],
661
+ options: _JSOptions,
662
+ lineno: int = 1,
663
+ ) -> Generator[_ExtractionResult, None, None]:
664
+ """Extract messages from JavaScript source code.
665
+
666
+ :param fileobj: the seekable, file-like object the messages should be
667
+ extracted from
668
+ :param keywords: a list of keywords (i.e. function names) that should be
669
+ recognized as translation functions
670
+ :param comment_tags: a list of translator tags to search for and include
671
+ in the results
672
+ :param options: a dictionary of additional options (optional)
673
+ Supported options are:
674
+ * `jsx` -- set to false to disable JSX/E4X support.
675
+ * `template_string` -- if `True`, supports gettext(`key`)
676
+ * `parse_template_string` -- if `True` will parse the
677
+ contents of javascript
678
+ template strings.
679
+ :param lineno: line number offset (for parsing embedded fragments)
680
+ """
681
+ from babel.messages.jslexer import Token, tokenize, unquote_string
682
+ funcname = message_lineno = None
683
+ messages = []
684
+ last_argument = None
685
+ translator_comments = []
686
+ concatenate_next = False
687
+ encoding = options.get('encoding', 'utf-8')
688
+ last_token = None
689
+ call_stack = -1
690
+ dotted = any('.' in kw for kw in keywords)
691
+ for token in tokenize(
692
+ fileobj.read().decode(encoding),
693
+ jsx=options.get("jsx", True),
694
+ template_string=options.get("template_string", True),
695
+ dotted=dotted,
696
+ lineno=lineno,
697
+ ):
698
+ if ( # Turn keyword`foo` expressions into keyword("foo") calls:
699
+ funcname and # have a keyword...
700
+ (last_token and last_token.type == 'name') and # we've seen nothing after the keyword...
701
+ token.type == 'template_string' # this is a template string
702
+ ):
703
+ message_lineno = token.lineno
704
+ messages = [unquote_string(token.value)]
705
+ call_stack = 0
706
+ token = Token('operator', ')', token.lineno)
707
+
708
+ if options.get('parse_template_string') and not funcname and token.type == 'template_string':
709
+ yield from parse_template_string(token.value, keywords, comment_tags, options, token.lineno)
710
+
711
+ elif token.type == 'operator' and token.value == '(':
712
+ if funcname:
713
+ message_lineno = token.lineno
714
+ call_stack += 1
715
+
716
+ elif call_stack == -1 and token.type == 'linecomment':
717
+ value = token.value[2:].strip()
718
+ if translator_comments and \
719
+ translator_comments[-1][0] == token.lineno - 1:
720
+ translator_comments.append((token.lineno, value))
721
+ continue
722
+
723
+ for comment_tag in comment_tags:
724
+ if value.startswith(comment_tag):
725
+ translator_comments.append((token.lineno, value.strip()))
726
+ break
727
+
728
+ elif token.type == 'multilinecomment':
729
+ # only one multi-line comment may precede a translation
730
+ translator_comments = []
731
+ value = token.value[2:-2].strip()
732
+ for comment_tag in comment_tags:
733
+ if value.startswith(comment_tag):
734
+ lines = value.splitlines()
735
+ if lines:
736
+ lines[0] = lines[0].strip()
737
+ lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
738
+ for offset, line in enumerate(lines):
739
+ translator_comments.append((token.lineno + offset,
740
+ line))
741
+ break
742
+
743
+ elif funcname and call_stack == 0:
744
+ if token.type == 'operator' and token.value == ')':
745
+ if last_argument is not None:
746
+ messages.append(last_argument)
747
+ if len(messages) > 1:
748
+ messages = tuple(messages)
749
+ elif messages:
750
+ messages = messages[0]
751
+ else:
752
+ messages = None
753
+
754
+ # Comments don't apply unless they immediately precede the
755
+ # message
756
+ if translator_comments and \
757
+ translator_comments[-1][0] < message_lineno - 1:
758
+ translator_comments = []
759
+
760
+ if messages is not None:
761
+ yield (message_lineno, funcname, messages,
762
+ [comment[1] for comment in translator_comments])
763
+
764
+ funcname = message_lineno = last_argument = None
765
+ concatenate_next = False
766
+ translator_comments = []
767
+ messages = []
768
+ call_stack = -1
769
+
770
+ elif token.type in ('string', 'template_string'):
771
+ new_value = unquote_string(token.value)
772
+ if concatenate_next:
773
+ last_argument = (last_argument or '') + new_value
774
+ concatenate_next = False
775
+ else:
776
+ last_argument = new_value
777
+
778
+ elif token.type == 'operator':
779
+ if token.value == ',':
780
+ if last_argument is not None:
781
+ messages.append(last_argument)
782
+ last_argument = None
783
+ else:
784
+ messages.append(None)
785
+ concatenate_next = False
786
+ elif token.value == '+':
787
+ concatenate_next = True
788
+
789
+ elif call_stack > 0 and token.type == 'operator' \
790
+ and token.value == ')':
791
+ call_stack -= 1
792
+
793
+ elif funcname and call_stack == -1:
794
+ funcname = None
795
+
796
+ elif call_stack == -1 and token.type == 'name' and \
797
+ token.value in keywords and \
798
+ (last_token is None or last_token.type != 'name' or
799
+ last_token.value != 'function'):
800
+ funcname = token.value
801
+
802
+ last_token = token
803
+
804
+
805
+ def parse_template_string(
806
+ template_string: str,
807
+ keywords: Mapping[str, _Keyword],
808
+ comment_tags: Collection[str],
809
+ options: _JSOptions,
810
+ lineno: int = 1,
811
+ ) -> Generator[_ExtractionResult, None, None]:
812
+ """Parse JavaScript template string.
813
+
814
+ :param template_string: the template string to be parsed
815
+ :param keywords: a list of keywords (i.e. function names) that should be
816
+ recognized as translation functions
817
+ :param comment_tags: a list of translator tags to search for and include
818
+ in the results
819
+ :param options: a dictionary of additional options (optional)
820
+ :param lineno: starting line number (optional)
821
+ """
822
+ from babel.messages.jslexer import line_re
823
+ prev_character = None
824
+ level = 0
825
+ inside_str = False
826
+ expression_contents = ''
827
+ for character in template_string[1:-1]:
828
+ if not inside_str and character in ('"', "'", '`'):
829
+ inside_str = character
830
+ elif inside_str == character and prev_character != r'\\':
831
+ inside_str = False
832
+ if level:
833
+ expression_contents += character
834
+ if not inside_str:
835
+ if character == '{' and prev_character == '$':
836
+ level += 1
837
+ elif level and character == '}':
838
+ level -= 1
839
+ if level == 0 and expression_contents:
840
+ expression_contents = expression_contents[0:-1]
841
+ fake_file_obj = io.BytesIO(expression_contents.encode())
842
+ yield from extract_javascript(fake_file_obj, keywords, comment_tags, options, lineno)
843
+ lineno += len(line_re.findall(expression_contents))
844
+ expression_contents = ''
845
+ prev_character = character
846
+
847
+
848
+ _BUILTIN_EXTRACTORS = {
849
+ 'ignore': extract_nothing,
850
+ 'python': extract_python,
851
+ 'javascript': extract_javascript,
852
+ }
lib/python3.10/site-packages/babel/messages/frontend.py ADDED
@@ -0,0 +1,1202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.messages.frontend
3
+ ~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Frontends for the message extraction functionality.
6
+
7
+ :copyright: (c) 2013-2025 by the Babel Team.
8
+ :license: BSD, see LICENSE for more details.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import datetime
14
+ import fnmatch
15
+ import logging
16
+ import optparse
17
+ import os
18
+ import re
19
+ import shutil
20
+ import sys
21
+ import tempfile
22
+ import warnings
23
+ from configparser import RawConfigParser
24
+ from io import StringIO
25
+ from typing import BinaryIO, Iterable, Literal
26
+
27
+ from babel import Locale, localedata
28
+ from babel import __version__ as VERSION
29
+ from babel.core import UnknownLocaleError
30
+ from babel.messages.catalog import DEFAULT_HEADER, Catalog
31
+ from babel.messages.extract import (
32
+ DEFAULT_KEYWORDS,
33
+ DEFAULT_MAPPING,
34
+ check_and_call_extract_file,
35
+ extract_from_dir,
36
+ )
37
+ from babel.messages.mofile import write_mo
38
+ from babel.messages.pofile import read_po, write_po
39
+ from babel.util import LOCALTZ
40
+
41
+ log = logging.getLogger('babel')
42
+
43
+
44
+ class BaseError(Exception):
45
+ pass
46
+
47
+
48
+ class OptionError(BaseError):
49
+ pass
50
+
51
+
52
+ class SetupError(BaseError):
53
+ pass
54
+
55
+
56
+ class ConfigurationError(BaseError):
57
+ """
58
+ Raised for errors in configuration files.
59
+ """
60
+
61
+
62
+ def listify_value(arg, split=None):
63
+ """
64
+ Make a list out of an argument.
65
+
66
+ Values from `distutils` argument parsing are always single strings;
67
+ values from `optparse` parsing may be lists of strings that may need
68
+ to be further split.
69
+
70
+ No matter the input, this function returns a flat list of whitespace-trimmed
71
+ strings, with `None` values filtered out.
72
+
73
+ >>> listify_value("foo bar")
74
+ ['foo', 'bar']
75
+ >>> listify_value(["foo bar"])
76
+ ['foo', 'bar']
77
+ >>> listify_value([["foo"], "bar"])
78
+ ['foo', 'bar']
79
+ >>> listify_value([["foo"], ["bar", None, "foo"]])
80
+ ['foo', 'bar', 'foo']
81
+ >>> listify_value("foo, bar, quux", ",")
82
+ ['foo', 'bar', 'quux']
83
+
84
+ :param arg: A string or a list of strings
85
+ :param split: The argument to pass to `str.split()`.
86
+ :return:
87
+ """
88
+ out = []
89
+
90
+ if not isinstance(arg, (list, tuple)):
91
+ arg = [arg]
92
+
93
+ for val in arg:
94
+ if val is None:
95
+ continue
96
+ if isinstance(val, (list, tuple)):
97
+ out.extend(listify_value(val, split=split))
98
+ continue
99
+ out.extend(s.strip() for s in str(val).split(split))
100
+ assert all(isinstance(val, str) for val in out)
101
+ return out
102
+
103
+
104
+ class CommandMixin:
105
+ # This class is a small shim between Distutils commands and
106
+ # optparse option parsing in the frontend command line.
107
+
108
+ #: Option name to be input as `args` on the script command line.
109
+ as_args = None
110
+
111
+ #: Options which allow multiple values.
112
+ #: This is used by the `optparse` transmogrification code.
113
+ multiple_value_options = ()
114
+
115
+ #: Options which are booleans.
116
+ #: This is used by the `optparse` transmogrification code.
117
+ # (This is actually used by distutils code too, but is never
118
+ # declared in the base class.)
119
+ boolean_options = ()
120
+
121
+ #: Option aliases, to retain standalone command compatibility.
122
+ #: Distutils does not support option aliases, but optparse does.
123
+ #: This maps the distutils argument name to an iterable of aliases
124
+ #: that are usable with optparse.
125
+ option_aliases = {}
126
+
127
+ #: Choices for options that needed to be restricted to specific
128
+ #: list of choices.
129
+ option_choices = {}
130
+
131
+ #: Log object. To allow replacement in the script command line runner.
132
+ log = log
133
+
134
+ def __init__(self, dist=None):
135
+ # A less strict version of distutils' `__init__`.
136
+ self.distribution = dist
137
+ self.initialize_options()
138
+ self._dry_run = None
139
+ self.verbose = False
140
+ self.force = None
141
+ self.help = 0
142
+ self.finalized = 0
143
+
144
+ def initialize_options(self):
145
+ pass
146
+
147
+ def ensure_finalized(self):
148
+ if not self.finalized:
149
+ self.finalize_options()
150
+ self.finalized = 1
151
+
152
+ def finalize_options(self):
153
+ raise RuntimeError(
154
+ f"abstract method -- subclass {self.__class__} must override",
155
+ )
156
+
157
+
158
+ class CompileCatalog(CommandMixin):
159
+ description = 'compile message catalogs to binary MO files'
160
+ user_options = [
161
+ ('domain=', 'D',
162
+ "domains of PO files (space separated list, default 'messages')"),
163
+ ('directory=', 'd',
164
+ 'path to base directory containing the catalogs'),
165
+ ('input-file=', 'i',
166
+ 'name of the input file'),
167
+ ('output-file=', 'o',
168
+ "name of the output file (default "
169
+ "'<output_dir>/<locale>/LC_MESSAGES/<domain>.mo')"),
170
+ ('locale=', 'l',
171
+ 'locale of the catalog to compile'),
172
+ ('use-fuzzy', 'f',
173
+ 'also include fuzzy translations'),
174
+ ('statistics', None,
175
+ 'print statistics about translations'),
176
+ ]
177
+ boolean_options = ['use-fuzzy', 'statistics']
178
+
179
+ def initialize_options(self):
180
+ self.domain = 'messages'
181
+ self.directory = None
182
+ self.input_file = None
183
+ self.output_file = None
184
+ self.locale = None
185
+ self.use_fuzzy = False
186
+ self.statistics = False
187
+
188
+ def finalize_options(self):
189
+ self.domain = listify_value(self.domain)
190
+ if not self.input_file and not self.directory:
191
+ raise OptionError('you must specify either the input file or the base directory')
192
+ if not self.output_file and not self.directory:
193
+ raise OptionError('you must specify either the output file or the base directory')
194
+
195
+ def run(self):
196
+ n_errors = 0
197
+ for domain in self.domain:
198
+ for errors in self._run_domain(domain).values():
199
+ n_errors += len(errors)
200
+ if n_errors:
201
+ self.log.error('%d errors encountered.', n_errors)
202
+ return (1 if n_errors else 0)
203
+
204
+ def _run_domain(self, domain):
205
+ po_files = []
206
+ mo_files = []
207
+
208
+ if not self.input_file:
209
+ if self.locale:
210
+ po_files.append((self.locale,
211
+ os.path.join(self.directory, self.locale,
212
+ 'LC_MESSAGES',
213
+ f"{domain}.po")))
214
+ mo_files.append(os.path.join(self.directory, self.locale,
215
+ 'LC_MESSAGES',
216
+ f"{domain}.mo"))
217
+ else:
218
+ for locale in os.listdir(self.directory):
219
+ po_file = os.path.join(self.directory, locale,
220
+ 'LC_MESSAGES', f"{domain}.po")
221
+ if os.path.exists(po_file):
222
+ po_files.append((locale, po_file))
223
+ mo_files.append(os.path.join(self.directory, locale,
224
+ 'LC_MESSAGES',
225
+ f"{domain}.mo"))
226
+ else:
227
+ po_files.append((self.locale, self.input_file))
228
+ if self.output_file:
229
+ mo_files.append(self.output_file)
230
+ else:
231
+ mo_files.append(os.path.join(self.directory, self.locale,
232
+ 'LC_MESSAGES',
233
+ f"{domain}.mo"))
234
+
235
+ if not po_files:
236
+ raise OptionError('no message catalogs found')
237
+
238
+ catalogs_and_errors = {}
239
+
240
+ for idx, (locale, po_file) in enumerate(po_files):
241
+ mo_file = mo_files[idx]
242
+ with open(po_file, 'rb') as infile:
243
+ catalog = read_po(infile, locale)
244
+
245
+ if self.statistics:
246
+ translated = 0
247
+ for message in list(catalog)[1:]:
248
+ if message.string:
249
+ translated += 1
250
+ percentage = 0
251
+ if len(catalog):
252
+ percentage = translated * 100 // len(catalog)
253
+ self.log.info(
254
+ '%d of %d messages (%d%%) translated in %s',
255
+ translated, len(catalog), percentage, po_file,
256
+ )
257
+
258
+ if catalog.fuzzy and not self.use_fuzzy:
259
+ self.log.info('catalog %s is marked as fuzzy, skipping', po_file)
260
+ continue
261
+
262
+ catalogs_and_errors[catalog] = catalog_errors = list(catalog.check())
263
+ for message, errors in catalog_errors:
264
+ for error in errors:
265
+ self.log.error(
266
+ 'error: %s:%d: %s', po_file, message.lineno, error,
267
+ )
268
+
269
+ self.log.info('compiling catalog %s to %s', po_file, mo_file)
270
+
271
+ with open(mo_file, 'wb') as outfile:
272
+ write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
273
+
274
+ return catalogs_and_errors
275
+
276
+
277
+ def _make_directory_filter(ignore_patterns):
278
+ """
279
+ Build a directory_filter function based on a list of ignore patterns.
280
+ """
281
+
282
+ def cli_directory_filter(dirname):
283
+ basename = os.path.basename(dirname)
284
+ return not any(
285
+ fnmatch.fnmatch(basename, ignore_pattern)
286
+ for ignore_pattern
287
+ in ignore_patterns
288
+ )
289
+
290
+ return cli_directory_filter
291
+
292
+
293
+ class ExtractMessages(CommandMixin):
294
+ description = 'extract localizable strings from the project code'
295
+ user_options = [
296
+ ('charset=', None,
297
+ 'charset to use in the output file (default "utf-8")'),
298
+ ('keywords=', 'k',
299
+ 'space-separated list of keywords to look for in addition to the '
300
+ 'defaults (may be repeated multiple times)'),
301
+ ('no-default-keywords', None,
302
+ 'do not include the default keywords'),
303
+ ('mapping-file=', 'F',
304
+ 'path to the mapping configuration file'),
305
+ ('no-location', None,
306
+ 'do not include location comments with filename and line number'),
307
+ ('add-location=', None,
308
+ 'location lines format. If it is not given or "full", it generates '
309
+ 'the lines with both file name and line number. If it is "file", '
310
+ 'the line number part is omitted. If it is "never", it completely '
311
+ 'suppresses the lines (same as --no-location).'),
312
+ ('omit-header', None,
313
+ 'do not include msgid "" entry in header'),
314
+ ('output-file=', 'o',
315
+ 'name of the output file'),
316
+ ('width=', 'w',
317
+ 'set output line width (default 76)'),
318
+ ('no-wrap', None,
319
+ 'do not break long message lines, longer than the output line width, '
320
+ 'into several lines'),
321
+ ('sort-output', None,
322
+ 'generate sorted output (default False)'),
323
+ ('sort-by-file', None,
324
+ 'sort output by file location (default False)'),
325
+ ('msgid-bugs-address=', None,
326
+ 'set report address for msgid'),
327
+ ('copyright-holder=', None,
328
+ 'set copyright holder in output'),
329
+ ('project=', None,
330
+ 'set project name in output'),
331
+ ('version=', None,
332
+ 'set project version in output'),
333
+ ('add-comments=', 'c',
334
+ 'place comment block with TAG (or those preceding keyword lines) in '
335
+ 'output file. Separate multiple TAGs with commas(,)'), # TODO: Support repetition of this argument
336
+ ('strip-comments', 's',
337
+ 'strip the comment TAGs from the comments.'),
338
+ ('input-paths=', None,
339
+ 'files or directories that should be scanned for messages. Separate multiple '
340
+ 'files or directories with commas(,)'), # TODO: Support repetition of this argument
341
+ ('input-dirs=', None, # TODO (3.x): Remove me.
342
+ 'alias for input-paths (does allow files as well as directories).'),
343
+ ('ignore-dirs=', None,
344
+ 'Patterns for directories to ignore when scanning for messages. '
345
+ 'Separate multiple patterns with spaces (default ".* ._")'),
346
+ ('header-comment=', None,
347
+ 'header comment for the catalog'),
348
+ ('last-translator=', None,
349
+ 'set the name and email of the last translator in output'),
350
+ ]
351
+ boolean_options = [
352
+ 'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
353
+ 'sort-output', 'sort-by-file', 'strip-comments',
354
+ ]
355
+ as_args = 'input-paths'
356
+ multiple_value_options = (
357
+ 'add-comments',
358
+ 'keywords',
359
+ 'ignore-dirs',
360
+ )
361
+ option_aliases = {
362
+ 'keywords': ('--keyword',),
363
+ 'mapping-file': ('--mapping',),
364
+ 'output-file': ('--output',),
365
+ 'strip-comments': ('--strip-comment-tags',),
366
+ 'last-translator': ('--last-translator',),
367
+ }
368
+ option_choices = {
369
+ 'add-location': ('full', 'file', 'never'),
370
+ }
371
+
372
+ def initialize_options(self):
373
+ self.charset = 'utf-8'
374
+ self.keywords = None
375
+ self.no_default_keywords = False
376
+ self.mapping_file = None
377
+ self.no_location = False
378
+ self.add_location = None
379
+ self.omit_header = False
380
+ self.output_file = None
381
+ self.input_dirs = None
382
+ self.input_paths = None
383
+ self.width = None
384
+ self.no_wrap = False
385
+ self.sort_output = False
386
+ self.sort_by_file = False
387
+ self.msgid_bugs_address = None
388
+ self.copyright_holder = None
389
+ self.project = None
390
+ self.version = None
391
+ self.add_comments = None
392
+ self.strip_comments = False
393
+ self.include_lineno = True
394
+ self.ignore_dirs = None
395
+ self.header_comment = None
396
+ self.last_translator = None
397
+
398
+ def finalize_options(self):
399
+ if self.input_dirs:
400
+ if not self.input_paths:
401
+ self.input_paths = self.input_dirs
402
+ else:
403
+ raise OptionError(
404
+ 'input-dirs and input-paths are mutually exclusive',
405
+ )
406
+
407
+ keywords = {} if self.no_default_keywords else DEFAULT_KEYWORDS.copy()
408
+
409
+ keywords.update(parse_keywords(listify_value(self.keywords)))
410
+
411
+ self.keywords = keywords
412
+
413
+ if not self.keywords:
414
+ raise OptionError(
415
+ 'you must specify new keywords if you disable the default ones',
416
+ )
417
+
418
+ if not self.output_file:
419
+ raise OptionError('no output file specified')
420
+ if self.no_wrap and self.width:
421
+ raise OptionError(
422
+ "'--no-wrap' and '--width' are mutually exclusive",
423
+ )
424
+ if not self.no_wrap and not self.width:
425
+ self.width = 76
426
+ elif self.width is not None:
427
+ self.width = int(self.width)
428
+
429
+ if self.sort_output and self.sort_by_file:
430
+ raise OptionError(
431
+ "'--sort-output' and '--sort-by-file' are mutually exclusive",
432
+ )
433
+
434
+ if self.input_paths:
435
+ if isinstance(self.input_paths, str):
436
+ self.input_paths = re.split(r',\s*', self.input_paths)
437
+ elif self.distribution is not None:
438
+ self.input_paths = dict.fromkeys([
439
+ k.split('.', 1)[0]
440
+ for k in (self.distribution.packages or ())
441
+ ]).keys()
442
+ else:
443
+ self.input_paths = []
444
+
445
+ if not self.input_paths:
446
+ raise OptionError("no input files or directories specified")
447
+
448
+ for path in self.input_paths:
449
+ if not os.path.exists(path):
450
+ raise OptionError(f"Input path: {path} does not exist")
451
+
452
+ self.add_comments = listify_value(self.add_comments or (), ",")
453
+
454
+ if self.distribution:
455
+ if not self.project:
456
+ self.project = self.distribution.get_name()
457
+ if not self.version:
458
+ self.version = self.distribution.get_version()
459
+
460
+ if self.add_location == 'never':
461
+ self.no_location = True
462
+ elif self.add_location == 'file':
463
+ self.include_lineno = False
464
+
465
+ ignore_dirs = listify_value(self.ignore_dirs)
466
+ if ignore_dirs:
467
+ self.directory_filter = _make_directory_filter(ignore_dirs)
468
+ else:
469
+ self.directory_filter = None
470
+
471
+ def _build_callback(self, path: str):
472
+ def callback(filename: str, method: str, options: dict):
473
+ if method == 'ignore':
474
+ return
475
+
476
+ # If we explicitly provide a full filepath, just use that.
477
+ # Otherwise, path will be the directory path and filename
478
+ # is the relative path from that dir to the file.
479
+ # So we can join those to get the full filepath.
480
+ if os.path.isfile(path):
481
+ filepath = path
482
+ else:
483
+ filepath = os.path.normpath(os.path.join(path, filename))
484
+
485
+ optstr = ''
486
+ if options:
487
+ opt_values = ", ".join(f'{k}="{v}"' for k, v in options.items())
488
+ optstr = f" ({opt_values})"
489
+ self.log.info('extracting messages from %s%s', filepath, optstr)
490
+
491
+ return callback
492
+
493
+ def run(self):
494
+ mappings = self._get_mappings()
495
+ with open(self.output_file, 'wb') as outfile:
496
+ catalog = Catalog(project=self.project,
497
+ version=self.version,
498
+ msgid_bugs_address=self.msgid_bugs_address,
499
+ copyright_holder=self.copyright_holder,
500
+ charset=self.charset,
501
+ header_comment=(self.header_comment or DEFAULT_HEADER),
502
+ last_translator=self.last_translator)
503
+
504
+ for path, method_map, options_map in mappings:
505
+ callback = self._build_callback(path)
506
+ if os.path.isfile(path):
507
+ current_dir = os.getcwd()
508
+ extracted = check_and_call_extract_file(
509
+ path, method_map, options_map,
510
+ callback, self.keywords, self.add_comments,
511
+ self.strip_comments, current_dir,
512
+ )
513
+ else:
514
+ extracted = extract_from_dir(
515
+ path, method_map, options_map,
516
+ keywords=self.keywords,
517
+ comment_tags=self.add_comments,
518
+ callback=callback,
519
+ strip_comment_tags=self.strip_comments,
520
+ directory_filter=self.directory_filter,
521
+ )
522
+ for filename, lineno, message, comments, context in extracted:
523
+ if os.path.isfile(path):
524
+ filepath = filename # already normalized
525
+ else:
526
+ filepath = os.path.normpath(os.path.join(path, filename))
527
+
528
+ catalog.add(message, None, [(filepath, lineno)],
529
+ auto_comments=comments, context=context)
530
+
531
+ self.log.info('writing PO template file to %s', self.output_file)
532
+ write_po(outfile, catalog, width=self.width,
533
+ no_location=self.no_location,
534
+ omit_header=self.omit_header,
535
+ sort_output=self.sort_output,
536
+ sort_by_file=self.sort_by_file,
537
+ include_lineno=self.include_lineno)
538
+
539
+ def _get_mappings(self):
540
+ mappings = []
541
+
542
+ if self.mapping_file:
543
+ if self.mapping_file.endswith(".toml"):
544
+ with open(self.mapping_file, "rb") as fileobj:
545
+ file_style = (
546
+ "pyproject.toml"
547
+ if os.path.basename(self.mapping_file) == "pyproject.toml"
548
+ else "standalone"
549
+ )
550
+ method_map, options_map = _parse_mapping_toml(
551
+ fileobj,
552
+ filename=self.mapping_file,
553
+ style=file_style,
554
+ )
555
+ else:
556
+ with open(self.mapping_file) as fileobj:
557
+ method_map, options_map = parse_mapping_cfg(fileobj, filename=self.mapping_file)
558
+ for path in self.input_paths:
559
+ mappings.append((path, method_map, options_map))
560
+
561
+ elif getattr(self.distribution, 'message_extractors', None):
562
+ message_extractors = self.distribution.message_extractors
563
+ for path, mapping in message_extractors.items():
564
+ if isinstance(mapping, str):
565
+ method_map, options_map = parse_mapping_cfg(StringIO(mapping))
566
+ else:
567
+ method_map, options_map = [], {}
568
+ for pattern, method, options in mapping:
569
+ method_map.append((pattern, method))
570
+ options_map[pattern] = options or {}
571
+ mappings.append((path, method_map, options_map))
572
+
573
+ else:
574
+ for path in self.input_paths:
575
+ mappings.append((path, DEFAULT_MAPPING, {}))
576
+
577
+ return mappings
578
+
579
+
580
+ class InitCatalog(CommandMixin):
581
+ description = 'create a new catalog based on a POT file'
582
+ user_options = [
583
+ ('domain=', 'D',
584
+ "domain of PO file (default 'messages')"),
585
+ ('input-file=', 'i',
586
+ 'name of the input file'),
587
+ ('output-dir=', 'd',
588
+ 'path to output directory'),
589
+ ('output-file=', 'o',
590
+ "name of the output file (default "
591
+ "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
592
+ ('locale=', 'l',
593
+ 'locale for the new localized catalog'),
594
+ ('width=', 'w',
595
+ 'set output line width (default 76)'),
596
+ ('no-wrap', None,
597
+ 'do not break long message lines, longer than the output line width, '
598
+ 'into several lines'),
599
+ ]
600
+ boolean_options = ['no-wrap']
601
+
602
+ def initialize_options(self):
603
+ self.output_dir = None
604
+ self.output_file = None
605
+ self.input_file = None
606
+ self.locale = None
607
+ self.domain = 'messages'
608
+ self.no_wrap = False
609
+ self.width = None
610
+
611
+ def finalize_options(self):
612
+ if not self.input_file:
613
+ raise OptionError('you must specify the input file')
614
+
615
+ if not self.locale:
616
+ raise OptionError('you must provide a locale for the new catalog')
617
+ try:
618
+ self._locale = Locale.parse(self.locale)
619
+ except UnknownLocaleError as e:
620
+ raise OptionError(e) from e
621
+
622
+ if not self.output_file and not self.output_dir:
623
+ raise OptionError('you must specify the output directory')
624
+ if not self.output_file:
625
+ self.output_file = os.path.join(self.output_dir, self.locale,
626
+ 'LC_MESSAGES', f"{self.domain}.po")
627
+
628
+ if not os.path.exists(os.path.dirname(self.output_file)):
629
+ os.makedirs(os.path.dirname(self.output_file))
630
+ if self.no_wrap and self.width:
631
+ raise OptionError("'--no-wrap' and '--width' are mutually exclusive")
632
+ if not self.no_wrap and not self.width:
633
+ self.width = 76
634
+ elif self.width is not None:
635
+ self.width = int(self.width)
636
+
637
+ def run(self):
638
+ self.log.info(
639
+ 'creating catalog %s based on %s', self.output_file, self.input_file,
640
+ )
641
+
642
+ with open(self.input_file, 'rb') as infile:
643
+ # Although reading from the catalog template, read_po must be fed
644
+ # the locale in order to correctly calculate plurals
645
+ catalog = read_po(infile, locale=self.locale)
646
+
647
+ catalog.locale = self._locale
648
+ catalog.revision_date = datetime.datetime.now(LOCALTZ)
649
+ catalog.fuzzy = False
650
+
651
+ with open(self.output_file, 'wb') as outfile:
652
+ write_po(outfile, catalog, width=self.width)
653
+
654
+
655
+ class UpdateCatalog(CommandMixin):
656
+ description = 'update message catalogs from a POT file'
657
+ user_options = [
658
+ ('domain=', 'D',
659
+ "domain of PO file (default 'messages')"),
660
+ ('input-file=', 'i',
661
+ 'name of the input file'),
662
+ ('output-dir=', 'd',
663
+ 'path to base directory containing the catalogs'),
664
+ ('output-file=', 'o',
665
+ "name of the output file (default "
666
+ "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
667
+ ('omit-header', None,
668
+ "do not include msgid "" entry in header"),
669
+ ('locale=', 'l',
670
+ 'locale of the catalog to compile'),
671
+ ('width=', 'w',
672
+ 'set output line width (default 76)'),
673
+ ('no-wrap', None,
674
+ 'do not break long message lines, longer than the output line width, '
675
+ 'into several lines'),
676
+ ('ignore-obsolete=', None,
677
+ 'whether to omit obsolete messages from the output'),
678
+ ('init-missing=', None,
679
+ 'if any output files are missing, initialize them first'),
680
+ ('no-fuzzy-matching', 'N',
681
+ 'do not use fuzzy matching'),
682
+ ('update-header-comment', None,
683
+ 'update target header comment'),
684
+ ('previous', None,
685
+ 'keep previous msgids of translated messages'),
686
+ ('check=', None,
687
+ 'don\'t update the catalog, just return the status. Return code 0 '
688
+ 'means nothing would change. Return code 1 means that the catalog '
689
+ 'would be updated'),
690
+ ('ignore-pot-creation-date=', None,
691
+ 'ignore changes to POT-Creation-Date when updating or checking'),
692
+ ]
693
+ boolean_options = [
694
+ 'omit-header', 'no-wrap', 'ignore-obsolete', 'init-missing',
695
+ 'no-fuzzy-matching', 'previous', 'update-header-comment',
696
+ 'check', 'ignore-pot-creation-date',
697
+ ]
698
+
699
+ def initialize_options(self):
700
+ self.domain = 'messages'
701
+ self.input_file = None
702
+ self.output_dir = None
703
+ self.output_file = None
704
+ self.omit_header = False
705
+ self.locale = None
706
+ self.width = None
707
+ self.no_wrap = False
708
+ self.ignore_obsolete = False
709
+ self.init_missing = False
710
+ self.no_fuzzy_matching = False
711
+ self.update_header_comment = False
712
+ self.previous = False
713
+ self.check = False
714
+ self.ignore_pot_creation_date = False
715
+
716
+ def finalize_options(self):
717
+ if not self.input_file:
718
+ raise OptionError('you must specify the input file')
719
+ if not self.output_file and not self.output_dir:
720
+ raise OptionError('you must specify the output file or directory')
721
+ if self.output_file and not self.locale:
722
+ raise OptionError('you must specify the locale')
723
+
724
+ if self.init_missing:
725
+ if not self.locale:
726
+ raise OptionError(
727
+ 'you must specify the locale for '
728
+ 'the init-missing option to work',
729
+ )
730
+
731
+ try:
732
+ self._locale = Locale.parse(self.locale)
733
+ except UnknownLocaleError as e:
734
+ raise OptionError(e) from e
735
+ else:
736
+ self._locale = None
737
+
738
+ if self.no_wrap and self.width:
739
+ raise OptionError("'--no-wrap' and '--width' are mutually exclusive")
740
+ if not self.no_wrap and not self.width:
741
+ self.width = 76
742
+ elif self.width is not None:
743
+ self.width = int(self.width)
744
+ if self.no_fuzzy_matching and self.previous:
745
+ self.previous = False
746
+
747
+ def run(self):
748
+ check_status = {}
749
+ po_files = []
750
+ if not self.output_file:
751
+ if self.locale:
752
+ po_files.append((self.locale,
753
+ os.path.join(self.output_dir, self.locale,
754
+ 'LC_MESSAGES',
755
+ f"{self.domain}.po")))
756
+ else:
757
+ for locale in os.listdir(self.output_dir):
758
+ po_file = os.path.join(self.output_dir, locale,
759
+ 'LC_MESSAGES',
760
+ f"{self.domain}.po")
761
+ if os.path.exists(po_file):
762
+ po_files.append((locale, po_file))
763
+ else:
764
+ po_files.append((self.locale, self.output_file))
765
+
766
+ if not po_files:
767
+ raise OptionError('no message catalogs found')
768
+
769
+ domain = self.domain
770
+ if not domain:
771
+ domain = os.path.splitext(os.path.basename(self.input_file))[0]
772
+
773
+ with open(self.input_file, 'rb') as infile:
774
+ template = read_po(infile)
775
+
776
+ for locale, filename in po_files:
777
+ if self.init_missing and not os.path.exists(filename):
778
+ if self.check:
779
+ check_status[filename] = False
780
+ continue
781
+ self.log.info(
782
+ 'creating catalog %s based on %s', filename, self.input_file,
783
+ )
784
+
785
+ with open(self.input_file, 'rb') as infile:
786
+ # Although reading from the catalog template, read_po must
787
+ # be fed the locale in order to correctly calculate plurals
788
+ catalog = read_po(infile, locale=self.locale)
789
+
790
+ catalog.locale = self._locale
791
+ catalog.revision_date = datetime.datetime.now(LOCALTZ)
792
+ catalog.fuzzy = False
793
+
794
+ with open(filename, 'wb') as outfile:
795
+ write_po(outfile, catalog)
796
+
797
+ self.log.info('updating catalog %s based on %s', filename, self.input_file)
798
+ with open(filename, 'rb') as infile:
799
+ catalog = read_po(infile, locale=locale, domain=domain)
800
+
801
+ catalog.update(
802
+ template, self.no_fuzzy_matching,
803
+ update_header_comment=self.update_header_comment,
804
+ update_creation_date=not self.ignore_pot_creation_date,
805
+ )
806
+
807
+ tmpname = os.path.join(os.path.dirname(filename),
808
+ tempfile.gettempprefix() +
809
+ os.path.basename(filename))
810
+ try:
811
+ with open(tmpname, 'wb') as tmpfile:
812
+ write_po(tmpfile, catalog,
813
+ omit_header=self.omit_header,
814
+ ignore_obsolete=self.ignore_obsolete,
815
+ include_previous=self.previous, width=self.width)
816
+ except Exception:
817
+ os.remove(tmpname)
818
+ raise
819
+
820
+ if self.check:
821
+ with open(filename, "rb") as origfile:
822
+ original_catalog = read_po(origfile)
823
+ with open(tmpname, "rb") as newfile:
824
+ updated_catalog = read_po(newfile)
825
+ updated_catalog.revision_date = original_catalog.revision_date
826
+ check_status[filename] = updated_catalog.is_identical(original_catalog)
827
+ os.remove(tmpname)
828
+ continue
829
+
830
+ try:
831
+ os.rename(tmpname, filename)
832
+ except OSError:
833
+ # We're probably on Windows, which doesn't support atomic
834
+ # renames, at least not through Python
835
+ # If the error is in fact due to a permissions problem, that
836
+ # same error is going to be raised from one of the following
837
+ # operations
838
+ os.remove(filename)
839
+ shutil.copy(tmpname, filename)
840
+ os.remove(tmpname)
841
+
842
+ if self.check:
843
+ for filename, up_to_date in check_status.items():
844
+ if up_to_date:
845
+ self.log.info('Catalog %s is up to date.', filename)
846
+ else:
847
+ self.log.warning('Catalog %s is out of date.', filename)
848
+ if not all(check_status.values()):
849
+ raise BaseError("Some catalogs are out of date.")
850
+ else:
851
+ self.log.info("All the catalogs are up-to-date.")
852
+ return
853
+
854
+
855
+ class CommandLineInterface:
856
+ """Command-line interface.
857
+
858
+ This class provides a simple command-line interface to the message
859
+ extraction and PO file generation functionality.
860
+ """
861
+
862
+ usage = '%%prog %s [options] %s'
863
+ version = f'%prog {VERSION}'
864
+ commands = {
865
+ 'compile': 'compile message catalogs to MO files',
866
+ 'extract': 'extract messages from source files and generate a POT file',
867
+ 'init': 'create new message catalogs from a POT file',
868
+ 'update': 'update existing message catalogs from a POT file',
869
+ }
870
+
871
+ command_classes = {
872
+ 'compile': CompileCatalog,
873
+ 'extract': ExtractMessages,
874
+ 'init': InitCatalog,
875
+ 'update': UpdateCatalog,
876
+ }
877
+
878
+ log = None # Replaced on instance level
879
+
880
+ def run(self, argv=None):
881
+ """Main entry point of the command-line interface.
882
+
883
+ :param argv: list of arguments passed on the command-line
884
+ """
885
+
886
+ if argv is None:
887
+ argv = sys.argv
888
+
889
+ self.parser = optparse.OptionParser(usage=self.usage % ('command', '[args]'),
890
+ version=self.version)
891
+ self.parser.disable_interspersed_args()
892
+ self.parser.print_help = self._help
893
+ self.parser.add_option('--list-locales', dest='list_locales',
894
+ action='store_true',
895
+ help="print all known locales and exit")
896
+ self.parser.add_option('-v', '--verbose', action='store_const',
897
+ dest='loglevel', const=logging.DEBUG,
898
+ help='print as much as possible')
899
+ self.parser.add_option('-q', '--quiet', action='store_const',
900
+ dest='loglevel', const=logging.ERROR,
901
+ help='print as little as possible')
902
+ self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
903
+
904
+ options, args = self.parser.parse_args(argv[1:])
905
+
906
+ self._configure_logging(options.loglevel)
907
+ if options.list_locales:
908
+ identifiers = localedata.locale_identifiers()
909
+ id_width = max(len(identifier) for identifier in identifiers) + 1
910
+ for identifier in sorted(identifiers):
911
+ locale = Locale.parse(identifier)
912
+ print(f"{identifier:<{id_width}} {locale.english_name}")
913
+ return 0
914
+
915
+ if not args:
916
+ self.parser.error('no valid command or option passed. '
917
+ 'Try the -h/--help option for more information.')
918
+
919
+ cmdname = args[0]
920
+ if cmdname not in self.commands:
921
+ self.parser.error(f'unknown command "{cmdname}"')
922
+
923
+ cmdinst = self._configure_command(cmdname, args[1:])
924
+ return cmdinst.run()
925
+
926
+ def _configure_logging(self, loglevel):
927
+ self.log = log
928
+ self.log.setLevel(loglevel)
929
+ # Don't add a new handler for every instance initialization (#227), this
930
+ # would cause duplicated output when the CommandLineInterface as an
931
+ # normal Python class.
932
+ if self.log.handlers:
933
+ handler = self.log.handlers[0]
934
+ else:
935
+ handler = logging.StreamHandler()
936
+ self.log.addHandler(handler)
937
+ handler.setLevel(loglevel)
938
+ formatter = logging.Formatter('%(message)s')
939
+ handler.setFormatter(formatter)
940
+
941
+ def _help(self):
942
+ print(self.parser.format_help())
943
+ print("commands:")
944
+ cmd_width = max(8, max(len(command) for command in self.commands) + 1)
945
+ for name, description in sorted(self.commands.items()):
946
+ print(f" {name:<{cmd_width}} {description}")
947
+
948
+ def _configure_command(self, cmdname, argv):
949
+ """
950
+ :type cmdname: str
951
+ :type argv: list[str]
952
+ """
953
+ cmdclass = self.command_classes[cmdname]
954
+ cmdinst = cmdclass()
955
+ if self.log:
956
+ cmdinst.log = self.log # Use our logger, not distutils'.
957
+ assert isinstance(cmdinst, CommandMixin)
958
+ cmdinst.initialize_options()
959
+
960
+ parser = optparse.OptionParser(
961
+ usage=self.usage % (cmdname, ''),
962
+ description=self.commands[cmdname],
963
+ )
964
+ as_args: str | None = getattr(cmdclass, "as_args", None)
965
+ for long, short, help in cmdclass.user_options:
966
+ name = long.strip("=")
967
+ default = getattr(cmdinst, name.replace("-", "_"))
968
+ strs = [f"--{name}"]
969
+ if short:
970
+ strs.append(f"-{short}")
971
+ strs.extend(cmdclass.option_aliases.get(name, ()))
972
+ choices = cmdclass.option_choices.get(name, None)
973
+ if name == as_args:
974
+ parser.usage += f"<{name}>"
975
+ elif name in cmdclass.boolean_options:
976
+ parser.add_option(*strs, action="store_true", help=help)
977
+ elif name in cmdclass.multiple_value_options:
978
+ parser.add_option(*strs, action="append", help=help, choices=choices)
979
+ else:
980
+ parser.add_option(*strs, help=help, default=default, choices=choices)
981
+ options, args = parser.parse_args(argv)
982
+
983
+ if as_args:
984
+ setattr(options, as_args.replace('-', '_'), args)
985
+
986
+ for key, value in vars(options).items():
987
+ setattr(cmdinst, key, value)
988
+
989
+ try:
990
+ cmdinst.ensure_finalized()
991
+ except OptionError as err:
992
+ parser.error(str(err))
993
+
994
+ return cmdinst
995
+
996
+
997
+ def main():
998
+ return CommandLineInterface().run(sys.argv)
999
+
1000
+
1001
+ def parse_mapping(fileobj, filename=None):
1002
+ warnings.warn(
1003
+ "parse_mapping is deprecated, use parse_mapping_cfg instead",
1004
+ DeprecationWarning,
1005
+ stacklevel=2,
1006
+ )
1007
+ return parse_mapping_cfg(fileobj, filename)
1008
+
1009
+
1010
+ def parse_mapping_cfg(fileobj, filename=None):
1011
+ """Parse an extraction method mapping from a file-like object.
1012
+
1013
+ :param fileobj: a readable file-like object containing the configuration
1014
+ text to parse
1015
+ :param filename: the name of the file being parsed, for error messages
1016
+ """
1017
+ extractors = {}
1018
+ method_map = []
1019
+ options_map = {}
1020
+
1021
+ parser = RawConfigParser()
1022
+ parser.read_file(fileobj, filename)
1023
+
1024
+ for section in parser.sections():
1025
+ if section == 'extractors':
1026
+ extractors = dict(parser.items(section))
1027
+ else:
1028
+ method, pattern = (part.strip() for part in section.split(':', 1))
1029
+ method_map.append((pattern, method))
1030
+ options_map[pattern] = dict(parser.items(section))
1031
+
1032
+ if extractors:
1033
+ for idx, (pattern, method) in enumerate(method_map):
1034
+ if method in extractors:
1035
+ method = extractors[method]
1036
+ method_map[idx] = (pattern, method)
1037
+
1038
+ return method_map, options_map
1039
+
1040
+
1041
+ def _parse_config_object(config: dict, *, filename="(unknown)"):
1042
+ extractors = {}
1043
+ method_map = []
1044
+ options_map = {}
1045
+
1046
+ extractors_read = config.get("extractors", {})
1047
+ if not isinstance(extractors_read, dict):
1048
+ raise ConfigurationError(f"{filename}: extractors: Expected a dictionary, got {type(extractors_read)!r}")
1049
+ for method, callable_spec in extractors_read.items():
1050
+ if not isinstance(method, str):
1051
+ # Impossible via TOML, but could happen with a custom object.
1052
+ raise ConfigurationError(f"{filename}: extractors: Extraction method must be a string, got {method!r}")
1053
+ if not isinstance(callable_spec, str):
1054
+ raise ConfigurationError(f"{filename}: extractors: Callable specification must be a string, got {callable_spec!r}")
1055
+ extractors[method] = callable_spec
1056
+
1057
+ if "mapping" in config:
1058
+ raise ConfigurationError(f"{filename}: 'mapping' is not a valid key, did you mean 'mappings'?")
1059
+
1060
+ mappings_read = config.get("mappings", [])
1061
+ if not isinstance(mappings_read, list):
1062
+ raise ConfigurationError(f"{filename}: mappings: Expected a list, got {type(mappings_read)!r}")
1063
+ for idx, entry in enumerate(mappings_read):
1064
+ if not isinstance(entry, dict):
1065
+ raise ConfigurationError(f"{filename}: mappings[{idx}]: Expected a dictionary, got {type(entry)!r}")
1066
+ entry = entry.copy()
1067
+
1068
+ method = entry.pop("method", None)
1069
+ if not isinstance(method, str):
1070
+ raise ConfigurationError(f"{filename}: mappings[{idx}]: 'method' must be a string, got {method!r}")
1071
+ method = extractors.get(method, method) # Map the extractor name to the callable now
1072
+
1073
+ pattern = entry.pop("pattern", None)
1074
+ if not isinstance(pattern, (list, str)):
1075
+ raise ConfigurationError(f"{filename}: mappings[{idx}]: 'pattern' must be a list or a string, got {pattern!r}")
1076
+ if not isinstance(pattern, list):
1077
+ pattern = [pattern]
1078
+
1079
+ for pat in pattern:
1080
+ if not isinstance(pat, str):
1081
+ raise ConfigurationError(f"{filename}: mappings[{idx}]: 'pattern' elements must be strings, got {pat!r}")
1082
+ method_map.append((pat, method))
1083
+ options_map[pat] = entry
1084
+
1085
+ return method_map, options_map
1086
+
1087
+
1088
+ def _parse_mapping_toml(
1089
+ fileobj: BinaryIO,
1090
+ filename: str = "(unknown)",
1091
+ style: Literal["standalone", "pyproject.toml"] = "standalone",
1092
+ ):
1093
+ """Parse an extraction method mapping from a binary file-like object.
1094
+
1095
+ .. warning: As of this version of Babel, this is a private API subject to changes.
1096
+
1097
+ :param fileobj: a readable binary file-like object containing the configuration TOML to parse
1098
+ :param filename: the name of the file being parsed, for error messages
1099
+ :param style: whether the file is in the style of a `pyproject.toml` file, i.e. whether to look for `tool.babel`.
1100
+ """
1101
+ try:
1102
+ import tomllib
1103
+ except ImportError:
1104
+ try:
1105
+ import tomli as tomllib
1106
+ except ImportError as ie: # pragma: no cover
1107
+ raise ImportError("tomli or tomllib is required to parse TOML files") from ie
1108
+
1109
+ try:
1110
+ parsed_data = tomllib.load(fileobj)
1111
+ except tomllib.TOMLDecodeError as e:
1112
+ raise ConfigurationError(f"{filename}: Error parsing TOML file: {e}") from e
1113
+
1114
+ if style == "pyproject.toml":
1115
+ try:
1116
+ babel_data = parsed_data["tool"]["babel"]
1117
+ except (TypeError, KeyError) as e:
1118
+ raise ConfigurationError(f"{filename}: No 'tool.babel' section found in file") from e
1119
+ elif style == "standalone":
1120
+ babel_data = parsed_data
1121
+ if "babel" in babel_data:
1122
+ raise ConfigurationError(f"{filename}: 'babel' should not be present in a stand-alone configuration file")
1123
+ else: # pragma: no cover
1124
+ raise ValueError(f"Unknown TOML style {style!r}")
1125
+
1126
+ return _parse_config_object(babel_data, filename=filename)
1127
+
1128
+
1129
+ def _parse_spec(s: str) -> tuple[int | None, tuple[int | tuple[int, str], ...]]:
1130
+ inds = []
1131
+ number = None
1132
+ for x in s.split(','):
1133
+ if x[-1] == 't':
1134
+ number = int(x[:-1])
1135
+ elif x[-1] == 'c':
1136
+ inds.append((int(x[:-1]), 'c'))
1137
+ else:
1138
+ inds.append(int(x))
1139
+ return number, tuple(inds)
1140
+
1141
+
1142
+ def parse_keywords(strings: Iterable[str] = ()):
1143
+ """Parse keywords specifications from the given list of strings.
1144
+
1145
+ >>> import pprint
1146
+ >>> keywords = ['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2',
1147
+ ... 'polymorphic:1', 'polymorphic:2,2t', 'polymorphic:3c,3t']
1148
+ >>> pprint.pprint(parse_keywords(keywords))
1149
+ {'_': None,
1150
+ 'dgettext': (2,),
1151
+ 'dngettext': (2, 3),
1152
+ 'pgettext': ((1, 'c'), 2),
1153
+ 'polymorphic': {None: (1,), 2: (2,), 3: ((3, 'c'),)}}
1154
+
1155
+ The input keywords are in GNU Gettext style; see :doc:`cmdline` for details.
1156
+
1157
+ The output is a dictionary mapping keyword names to a dictionary of specifications.
1158
+ Keys in this dictionary are numbers of arguments, where ``None`` means that all numbers
1159
+ of arguments are matched, and a number means only calls with that number of arguments
1160
+ are matched (which happens when using the "t" specifier). However, as a special
1161
+ case for backwards compatibility, if the dictionary of specifications would
1162
+ be ``{None: x}``, i.e., there is only one specification and it matches all argument
1163
+ counts, then it is collapsed into just ``x``.
1164
+
1165
+ A specification is either a tuple or None. If a tuple, each element can be either a number
1166
+ ``n``, meaning that the nth argument should be extracted as a message, or the tuple
1167
+ ``(n, 'c')``, meaning that the nth argument should be extracted as context for the
1168
+ messages. A ``None`` specification is equivalent to ``(1,)``, extracting the first
1169
+ argument.
1170
+ """
1171
+ keywords = {}
1172
+ for string in strings:
1173
+ if ':' in string:
1174
+ funcname, spec_str = string.split(':')
1175
+ number, spec = _parse_spec(spec_str)
1176
+ else:
1177
+ funcname = string
1178
+ number = None
1179
+ spec = None
1180
+ keywords.setdefault(funcname, {})[number] = spec
1181
+
1182
+ # For best backwards compatibility, collapse {None: x} into x.
1183
+ for k, v in keywords.items():
1184
+ if set(v) == {None}:
1185
+ keywords[k] = v[None]
1186
+
1187
+ return keywords
1188
+
1189
+
1190
+ def __getattr__(name: str):
1191
+ # Re-exports for backwards compatibility;
1192
+ # `setuptools_frontend` is the canonical import location.
1193
+ if name in {'check_message_extractors', 'compile_catalog', 'extract_messages', 'init_catalog', 'update_catalog'}:
1194
+ from babel.messages import setuptools_frontend
1195
+
1196
+ return getattr(setuptools_frontend, name)
1197
+
1198
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
1199
+
1200
+
1201
+ if __name__ == '__main__':
1202
+ main()
lib/python3.10/site-packages/babel/messages/jslexer.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.messages.jslexer
3
+ ~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ A simple JavaScript 1.5 lexer which is used for the JavaScript
6
+ extractor.
7
+
8
+ :copyright: (c) 2013-2025 by the Babel Team.
9
+ :license: BSD, see LICENSE for more details.
10
+ """
11
+ from __future__ import annotations
12
+
13
+ import re
14
+ from collections.abc import Generator
15
+ from typing import NamedTuple
16
+
17
+ operators: list[str] = sorted([
18
+ '+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',
19
+ '+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',
20
+ '>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
21
+ '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':',
22
+ ], key=len, reverse=True)
23
+
24
+ escapes: dict[str, str] = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}
25
+
26
+ name_re = re.compile(r'[\w$_][\w\d$_]*', re.UNICODE)
27
+ dotted_name_re = re.compile(r'[\w$_][\w\d$_.]*[\w\d$_.]', re.UNICODE)
28
+ division_re = re.compile(r'/=?')
29
+ regex_re = re.compile(r'/(?:[^/\\]*(?:\\.[^/\\]*)*)/[a-zA-Z]*', re.DOTALL)
30
+ line_re = re.compile(r'(\r\n|\n|\r)')
31
+ line_join_re = re.compile(r'\\' + line_re.pattern)
32
+ uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')
33
+ hex_escape_re = re.compile(r'[a-fA-F0-9]{1,2}')
34
+
35
+
36
+ class Token(NamedTuple):
37
+ type: str
38
+ value: str
39
+ lineno: int
40
+
41
+
42
+ _rules: list[tuple[str | None, re.Pattern[str]]] = [
43
+ (None, re.compile(r'\s+', re.UNICODE)),
44
+ (None, re.compile(r'<!--.*')),
45
+ ('linecomment', re.compile(r'//.*')),
46
+ ('multilinecomment', re.compile(r'/\*.*?\*/', re.UNICODE | re.DOTALL)),
47
+ ('dotted_name', dotted_name_re),
48
+ ('name', name_re),
49
+ ('number', re.compile(r'''(
50
+ (?:0|[1-9]\d*)
51
+ (\.\d+)?
52
+ ([eE][-+]?\d+)? |
53
+ (0x[a-fA-F0-9]+)
54
+ )''', re.VERBOSE)),
55
+ ('jsx_tag', re.compile(r'(?:</?[^>\s]+|/>)', re.I)), # May be mangled in `get_rules`
56
+ ('operator', re.compile(r'(%s)' % '|'.join(map(re.escape, operators)))),
57
+ ('template_string', re.compile(r'''`(?:[^`\\]*(?:\\.[^`\\]*)*)`''', re.UNICODE)),
58
+ ('string', re.compile(r'''(
59
+ '(?:[^'\\]*(?:\\.[^'\\]*)*)' |
60
+ "(?:[^"\\]*(?:\\.[^"\\]*)*)"
61
+ )''', re.VERBOSE | re.DOTALL)),
62
+ ]
63
+
64
+
65
+ def get_rules(jsx: bool, dotted: bool, template_string: bool) -> list[tuple[str | None, re.Pattern[str]]]:
66
+ """
67
+ Get a tokenization rule list given the passed syntax options.
68
+
69
+ Internal to this module.
70
+ """
71
+ rules = []
72
+ for token_type, rule in _rules:
73
+ if not jsx and token_type and 'jsx' in token_type:
74
+ continue
75
+ if not template_string and token_type == 'template_string':
76
+ continue
77
+ if token_type == 'dotted_name':
78
+ if not dotted:
79
+ continue
80
+ token_type = 'name'
81
+ rules.append((token_type, rule))
82
+ return rules
83
+
84
+
85
+ def indicates_division(token: Token) -> bool:
86
+ """A helper function that helps the tokenizer to decide if the current
87
+ token may be followed by a division operator.
88
+ """
89
+ if token.type == 'operator':
90
+ return token.value in (')', ']', '}', '++', '--')
91
+ return token.type in ('name', 'number', 'string', 'regexp')
92
+
93
+
94
+ def unquote_string(string: str) -> str:
95
+ """Unquote a string with JavaScript rules. The string has to start with
96
+ string delimiters (``'``, ``"`` or the back-tick/grave accent (for template strings).)
97
+ """
98
+ assert string and string[0] == string[-1] and string[0] in '"\'`', \
99
+ 'string provided is not properly delimited'
100
+ string = line_join_re.sub('\\1', string[1:-1])
101
+ result: list[str] = []
102
+ add = result.append
103
+ pos = 0
104
+
105
+ while True:
106
+ # scan for the next escape
107
+ escape_pos = string.find('\\', pos)
108
+ if escape_pos < 0:
109
+ break
110
+ add(string[pos:escape_pos])
111
+
112
+ # check which character is escaped
113
+ next_char = string[escape_pos + 1]
114
+ if next_char in escapes:
115
+ add(escapes[next_char])
116
+
117
+ # unicode escapes. trie to consume up to four characters of
118
+ # hexadecimal characters and try to interpret them as unicode
119
+ # character point. If there is no such character point, put
120
+ # all the consumed characters into the string.
121
+ elif next_char in 'uU':
122
+ escaped = uni_escape_re.match(string, escape_pos + 2)
123
+ if escaped is not None:
124
+ escaped_value = escaped.group()
125
+ if len(escaped_value) == 4:
126
+ try:
127
+ add(chr(int(escaped_value, 16)))
128
+ except ValueError:
129
+ pass
130
+ else:
131
+ pos = escape_pos + 6
132
+ continue
133
+ add(next_char + escaped_value)
134
+ pos = escaped.end()
135
+ continue
136
+ else:
137
+ add(next_char)
138
+
139
+ # hex escapes. conversion from 2-digits hex to char is infallible
140
+ elif next_char in 'xX':
141
+ escaped = hex_escape_re.match(string, escape_pos + 2)
142
+ if escaped is not None:
143
+ escaped_value = escaped.group()
144
+ add(chr(int(escaped_value, 16)))
145
+ pos = escape_pos + 2 + len(escaped_value)
146
+ continue
147
+ else:
148
+ add(next_char)
149
+
150
+ # bogus escape. Just remove the backslash.
151
+ else:
152
+ add(next_char)
153
+ pos = escape_pos + 2
154
+
155
+ if pos < len(string):
156
+ add(string[pos:])
157
+
158
+ return ''.join(result)
159
+
160
+
161
+ def tokenize(source: str, jsx: bool = True, dotted: bool = True, template_string: bool = True, lineno: int = 1) -> Generator[Token, None, None]:
162
+ """
163
+ Tokenize JavaScript/JSX source. Returns a generator of tokens.
164
+
165
+ :param source: The JavaScript source to tokenize.
166
+ :param jsx: Enable (limited) JSX parsing.
167
+ :param dotted: Read dotted names as single name token.
168
+ :param template_string: Support ES6 template strings
169
+ :param lineno: starting line number (optional)
170
+ """
171
+ may_divide = False
172
+ pos = 0
173
+ end = len(source)
174
+ rules = get_rules(jsx=jsx, dotted=dotted, template_string=template_string)
175
+
176
+ while pos < end:
177
+ # handle regular rules first
178
+ for token_type, rule in rules: # noqa: B007
179
+ match = rule.match(source, pos)
180
+ if match is not None:
181
+ break
182
+ # if we don't have a match we don't give up yet, but check for
183
+ # division operators or regular expression literals, based on
184
+ # the status of `may_divide` which is determined by the last
185
+ # processed non-whitespace token using `indicates_division`.
186
+ else:
187
+ if may_divide:
188
+ match = division_re.match(source, pos)
189
+ token_type = 'operator'
190
+ else:
191
+ match = regex_re.match(source, pos)
192
+ token_type = 'regexp'
193
+ if match is None:
194
+ # woops. invalid syntax. jump one char ahead and try again.
195
+ pos += 1
196
+ continue
197
+
198
+ token_value = match.group()
199
+ if token_type is not None:
200
+ token = Token(token_type, token_value, lineno)
201
+ may_divide = indicates_division(token)
202
+ yield token
203
+ lineno += len(line_re.findall(token_value))
204
+ pos = match.end()
lib/python3.10/site-packages/babel/messages/mofile.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.messages.mofile
3
+ ~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Writing of files in the ``gettext`` MO (machine object) format.
6
+
7
+ :copyright: (c) 2013-2025 by the Babel Team.
8
+ :license: BSD, see LICENSE for more details.
9
+ """
10
+ from __future__ import annotations
11
+
12
+ import array
13
+ import struct
14
+ from typing import TYPE_CHECKING
15
+
16
+ from babel.messages.catalog import Catalog, Message
17
+
18
+ if TYPE_CHECKING:
19
+ from _typeshed import SupportsRead, SupportsWrite
20
+
21
+ LE_MAGIC: int = 0x950412de
22
+ BE_MAGIC: int = 0xde120495
23
+
24
+
25
+ def read_mo(fileobj: SupportsRead[bytes]) -> Catalog:
26
+ """Read a binary MO file from the given file-like object and return a
27
+ corresponding `Catalog` object.
28
+
29
+ :param fileobj: the file-like object to read the MO file from
30
+
31
+ :note: The implementation of this function is heavily based on the
32
+ ``GNUTranslations._parse`` method of the ``gettext`` module in the
33
+ standard library.
34
+ """
35
+ catalog = Catalog()
36
+ headers = {}
37
+
38
+ filename = getattr(fileobj, 'name', '')
39
+
40
+ buf = fileobj.read()
41
+ buflen = len(buf)
42
+ unpack = struct.unpack
43
+
44
+ # Parse the .mo file header, which consists of 5 little endian 32
45
+ # bit words.
46
+ magic = unpack('<I', buf[:4])[0] # Are we big endian or little endian?
47
+ if magic == LE_MAGIC:
48
+ version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])
49
+ ii = '<II'
50
+ elif magic == BE_MAGIC:
51
+ version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])
52
+ ii = '>II'
53
+ else:
54
+ raise OSError(0, 'Bad magic number', filename)
55
+
56
+ # Now put all messages from the .mo file buffer into the catalog
57
+ # dictionary
58
+ for _i in range(msgcount):
59
+ mlen, moff = unpack(ii, buf[origidx:origidx + 8])
60
+ mend = moff + mlen
61
+ tlen, toff = unpack(ii, buf[transidx:transidx + 8])
62
+ tend = toff + tlen
63
+ if mend < buflen and tend < buflen:
64
+ msg = buf[moff:mend]
65
+ tmsg = buf[toff:tend]
66
+ else:
67
+ raise OSError(0, 'File is corrupt', filename)
68
+
69
+ # See if we're looking at GNU .mo conventions for metadata
70
+ if mlen == 0:
71
+ # Catalog description
72
+ lastkey = key = None
73
+ for item in tmsg.splitlines():
74
+ item = item.strip()
75
+ if not item:
76
+ continue
77
+ if b':' in item:
78
+ key, value = item.split(b':', 1)
79
+ lastkey = key = key.strip().lower()
80
+ headers[key] = value.strip()
81
+ elif lastkey:
82
+ headers[lastkey] += b'\n' + item
83
+
84
+ if b'\x04' in msg: # context
85
+ ctxt, msg = msg.split(b'\x04')
86
+ else:
87
+ ctxt = None
88
+
89
+ if b'\x00' in msg: # plural forms
90
+ msg = msg.split(b'\x00')
91
+ tmsg = tmsg.split(b'\x00')
92
+ msg = [x.decode(catalog.charset) for x in msg]
93
+ tmsg = [x.decode(catalog.charset) for x in tmsg]
94
+ else:
95
+ msg = msg.decode(catalog.charset)
96
+ tmsg = tmsg.decode(catalog.charset)
97
+ catalog[msg] = Message(msg, tmsg, context=ctxt)
98
+
99
+ # advance to next entry in the seek tables
100
+ origidx += 8
101
+ transidx += 8
102
+
103
+ catalog.mime_headers = headers.items()
104
+ return catalog
105
+
106
+
107
+ def write_mo(fileobj: SupportsWrite[bytes], catalog: Catalog, use_fuzzy: bool = False) -> None:
108
+ """Write a catalog to the specified file-like object using the GNU MO file
109
+ format.
110
+
111
+ >>> import sys
112
+ >>> from babel.messages import Catalog
113
+ >>> from gettext import GNUTranslations
114
+ >>> from io import BytesIO
115
+
116
+ >>> catalog = Catalog(locale='en_US')
117
+ >>> catalog.add('foo', 'Voh')
118
+ <Message ...>
119
+ >>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))
120
+ <Message ...>
121
+ >>> catalog.add('fuz', 'Futz', flags=['fuzzy'])
122
+ <Message ...>
123
+ >>> catalog.add('Fizz', '')
124
+ <Message ...>
125
+ >>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
126
+ <Message ...>
127
+ >>> buf = BytesIO()
128
+
129
+ >>> write_mo(buf, catalog)
130
+ >>> x = buf.seek(0)
131
+ >>> translations = GNUTranslations(fp=buf)
132
+ >>> if sys.version_info[0] >= 3:
133
+ ... translations.ugettext = translations.gettext
134
+ ... translations.ungettext = translations.ngettext
135
+ >>> translations.ugettext('foo')
136
+ u'Voh'
137
+ >>> translations.ungettext('bar', 'baz', 1)
138
+ u'Bahr'
139
+ >>> translations.ungettext('bar', 'baz', 2)
140
+ u'Batz'
141
+ >>> translations.ugettext('fuz')
142
+ u'fuz'
143
+ >>> translations.ugettext('Fizz')
144
+ u'Fizz'
145
+ >>> translations.ugettext('Fuzz')
146
+ u'Fuzz'
147
+ >>> translations.ugettext('Fuzzes')
148
+ u'Fuzzes'
149
+
150
+ :param fileobj: the file-like object to write to
151
+ :param catalog: the `Catalog` instance
152
+ :param use_fuzzy: whether translations marked as "fuzzy" should be included
153
+ in the output
154
+ """
155
+ messages = list(catalog)
156
+ messages[1:] = [m for m in messages[1:]
157
+ if m.string and (use_fuzzy or not m.fuzzy)]
158
+ messages.sort()
159
+
160
+ ids = strs = b''
161
+ offsets = []
162
+
163
+ for message in messages:
164
+ # For each string, we need size and file offset. Each string is NUL
165
+ # terminated; the NUL does not count into the size.
166
+ if message.pluralizable:
167
+ msgid = b'\x00'.join([
168
+ msgid.encode(catalog.charset) for msgid in message.id
169
+ ])
170
+ msgstrs = []
171
+ for idx, string in enumerate(message.string):
172
+ if not string:
173
+ msgstrs.append(message.id[min(int(idx), 1)])
174
+ else:
175
+ msgstrs.append(string)
176
+ msgstr = b'\x00'.join([
177
+ msgstr.encode(catalog.charset) for msgstr in msgstrs
178
+ ])
179
+ else:
180
+ msgid = message.id.encode(catalog.charset)
181
+ msgstr = message.string.encode(catalog.charset)
182
+ if message.context:
183
+ msgid = b'\x04'.join([message.context.encode(catalog.charset),
184
+ msgid])
185
+ offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
186
+ ids += msgid + b'\x00'
187
+ strs += msgstr + b'\x00'
188
+
189
+ # The header is 7 32-bit unsigned integers. We don't use hash tables, so
190
+ # the keys start right after the index tables.
191
+ keystart = 7 * 4 + 16 * len(messages)
192
+ valuestart = keystart + len(ids)
193
+
194
+ # The string table first has the list of keys, then the list of values.
195
+ # Each entry has first the size of the string, then the file offset.
196
+ koffsets = []
197
+ voffsets = []
198
+ for o1, l1, o2, l2 in offsets:
199
+ koffsets += [l1, o1 + keystart]
200
+ voffsets += [l2, o2 + valuestart]
201
+ offsets = koffsets + voffsets
202
+
203
+ fileobj.write(struct.pack('Iiiiiii',
204
+ LE_MAGIC, # magic
205
+ 0, # version
206
+ len(messages), # number of entries
207
+ 7 * 4, # start of key index
208
+ 7 * 4 + len(messages) * 8, # start of value index
209
+ 0, 0, # size and offset of hash table
210
+ ) + array.array.tobytes(array.array("i", offsets)) + ids + strs)
lib/python3.10/site-packages/babel/messages/plurals.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.messages.plurals
3
+ ~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Plural form definitions.
6
+
7
+ :copyright: (c) 2013-2025 by the Babel Team.
8
+ :license: BSD, see LICENSE for more details.
9
+ """
10
+ from __future__ import annotations
11
+
12
+ from babel.core import Locale, default_locale
13
+
14
+ # XXX: remove this file, duplication with babel.plural
15
+
16
+
17
+ LC_CTYPE: str | None = default_locale('LC_CTYPE')
18
+
19
+
20
+ PLURALS: dict[str, tuple[int, str]] = {
21
+ # Afar
22
+ # 'aa': (),
23
+ # Abkhazian
24
+ # 'ab': (),
25
+ # Avestan
26
+ # 'ae': (),
27
+ # Afrikaans - From Pootle's PO's
28
+ 'af': (2, '(n != 1)'),
29
+ # Akan
30
+ # 'ak': (),
31
+ # Amharic
32
+ # 'am': (),
33
+ # Aragonese
34
+ # 'an': (),
35
+ # Arabic - From Pootle's PO's
36
+ 'ar': (6, '(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 : n%100>=0 && n%100<=2 ? 4 : 5)'),
37
+ # Assamese
38
+ # 'as': (),
39
+ # Avaric
40
+ # 'av': (),
41
+ # Aymara
42
+ # 'ay': (),
43
+ # Azerbaijani
44
+ # 'az': (),
45
+ # Bashkir
46
+ # 'ba': (),
47
+ # Belarusian
48
+ 'be': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
49
+ # Bulgarian - From Pootle's PO's
50
+ 'bg': (2, '(n != 1)'),
51
+ # Bihari
52
+ # 'bh': (),
53
+ # Bislama
54
+ # 'bi': (),
55
+ # Bambara
56
+ # 'bm': (),
57
+ # Bengali - From Pootle's PO's
58
+ 'bn': (2, '(n != 1)'),
59
+ # Tibetan - as discussed in private with Andrew West
60
+ 'bo': (1, '0'),
61
+ # Breton
62
+ 'br': (
63
+ 6,
64
+ '(n==1 ? 0 : n%10==1 && n%100!=11 && n%100!=71 && n%100!=91 ? 1 : n%10==2 && n%100!=12 && n%100!=72 && '
65
+ 'n%100!=92 ? 2 : (n%10==3 || n%10==4 || n%10==9) && n%100!=13 && n%100!=14 && n%100!=19 && n%100!=73 && '
66
+ 'n%100!=74 && n%100!=79 && n%100!=93 && n%100!=94 && n%100!=99 ? 3 : n%1000000==0 ? 4 : 5)',
67
+ ),
68
+ # Bosnian
69
+ 'bs': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
70
+ # Catalan - From Pootle's PO's
71
+ 'ca': (2, '(n != 1)'),
72
+ # Chechen
73
+ # 'ce': (),
74
+ # Chamorro
75
+ # 'ch': (),
76
+ # Corsican
77
+ # 'co': (),
78
+ # Cree
79
+ # 'cr': (),
80
+ # Czech
81
+ 'cs': (3, '((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2)'),
82
+ # Church Slavic
83
+ # 'cu': (),
84
+ # Chuvash
85
+ 'cv': (1, '0'),
86
+ # Welsh
87
+ 'cy': (5, '(n==1 ? 1 : n==2 ? 2 : n==3 ? 3 : n==6 ? 4 : 0)'),
88
+ # Danish
89
+ 'da': (2, '(n != 1)'),
90
+ # German
91
+ 'de': (2, '(n != 1)'),
92
+ # Divehi
93
+ # 'dv': (),
94
+ # Dzongkha
95
+ 'dz': (1, '0'),
96
+ # Greek
97
+ 'el': (2, '(n != 1)'),
98
+ # English
99
+ 'en': (2, '(n != 1)'),
100
+ # Esperanto
101
+ 'eo': (2, '(n != 1)'),
102
+ # Spanish
103
+ 'es': (2, '(n != 1)'),
104
+ # Estonian
105
+ 'et': (2, '(n != 1)'),
106
+ # Basque - From Pootle's PO's
107
+ 'eu': (2, '(n != 1)'),
108
+ # Persian - From Pootle's PO's
109
+ 'fa': (1, '0'),
110
+ # Finnish
111
+ 'fi': (2, '(n != 1)'),
112
+ # French
113
+ 'fr': (2, '(n > 1)'),
114
+ # Friulian - From Pootle's PO's
115
+ 'fur': (2, '(n > 1)'),
116
+ # Irish
117
+ 'ga': (5, '(n==1 ? 0 : n==2 ? 1 : n>=3 && n<=6 ? 2 : n>=7 && n<=10 ? 3 : 4)'),
118
+ # Galician - From Pootle's PO's
119
+ 'gl': (2, '(n != 1)'),
120
+ # Hausa - From Pootle's PO's
121
+ 'ha': (2, '(n != 1)'),
122
+ # Hebrew
123
+ 'he': (2, '(n != 1)'),
124
+ # Hindi - From Pootle's PO's
125
+ 'hi': (2, '(n != 1)'),
126
+ # Croatian
127
+ 'hr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
128
+ # Hungarian
129
+ 'hu': (1, '0'),
130
+ # Armenian - From Pootle's PO's
131
+ 'hy': (1, '0'),
132
+ # Icelandic - From Pootle's PO's
133
+ 'is': (2, '(n%10==1 && n%100!=11 ? 0 : 1)'),
134
+ # Italian
135
+ 'it': (2, '(n != 1)'),
136
+ # Japanese
137
+ 'ja': (1, '0'),
138
+ # Georgian - From Pootle's PO's
139
+ 'ka': (1, '0'),
140
+ # Kongo - From Pootle's PO's
141
+ 'kg': (2, '(n != 1)'),
142
+ # Khmer - From Pootle's PO's
143
+ 'km': (1, '0'),
144
+ # Korean
145
+ 'ko': (1, '0'),
146
+ # Kurdish - From Pootle's PO's
147
+ 'ku': (2, '(n != 1)'),
148
+ # Lao - Another member of the Tai language family, like Thai.
149
+ 'lo': (1, '0'),
150
+ # Lithuanian
151
+ 'lt': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),
152
+ # Latvian
153
+ 'lv': (3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),
154
+ # Maltese - From Pootle's PO's
155
+ 'mt': (4, '(n==1 ? 0 : n==0 || ( n%100>=1 && n%100<=10) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),
156
+ # Norwegian Bokmål
157
+ 'nb': (2, '(n != 1)'),
158
+ # Dutch
159
+ 'nl': (2, '(n != 1)'),
160
+ # Norwegian Nynorsk
161
+ 'nn': (2, '(n != 1)'),
162
+ # Norwegian
163
+ 'no': (2, '(n != 1)'),
164
+ # Punjabi - From Pootle's PO's
165
+ 'pa': (2, '(n != 1)'),
166
+ # Polish
167
+ 'pl': (3, '(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
168
+ # Portuguese
169
+ 'pt': (2, '(n != 1)'),
170
+ # Brazilian
171
+ 'pt_BR': (2, '(n > 1)'),
172
+ # Romanian - From Pootle's PO's
173
+ 'ro': (3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'),
174
+ # Russian
175
+ 'ru': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
176
+ # Slovak
177
+ 'sk': (3, '((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2)'),
178
+ # Slovenian
179
+ 'sl': (4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),
180
+ # Serbian - From Pootle's PO's
181
+ 'sr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
182
+ # Southern Sotho - From Pootle's PO's
183
+ 'st': (2, '(n != 1)'),
184
+ # Swedish
185
+ 'sv': (2, '(n != 1)'),
186
+ # Thai
187
+ 'th': (1, '0'),
188
+ # Turkish
189
+ 'tr': (1, '0'),
190
+ # Ukrainian
191
+ 'uk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
192
+ # Venda - From Pootle's PO's
193
+ 've': (2, '(n != 1)'),
194
+ # Vietnamese - From Pootle's PO's
195
+ 'vi': (1, '0'),
196
+ # Xhosa - From Pootle's PO's
197
+ 'xh': (2, '(n != 1)'),
198
+ # Chinese - From Pootle's PO's (modified)
199
+ 'zh': (1, '0'),
200
+ }
201
+
202
+
203
+ DEFAULT_PLURAL: tuple[int, str] = (2, '(n != 1)')
204
+
205
+
206
+ class _PluralTuple(tuple):
207
+ """A tuple with plural information."""
208
+
209
+ __slots__ = ()
210
+
211
+ @property
212
+ def num_plurals(self) -> int:
213
+ """The number of plurals used by the locale."""
214
+ return self[0]
215
+
216
+ @property
217
+ def plural_expr(self) -> str:
218
+ """The plural expression used by the locale."""
219
+ return self[1]
220
+
221
+ @property
222
+ def plural_forms(self) -> str:
223
+ """The plural expression used by the catalog or locale."""
224
+ return f'nplurals={self[0]}; plural={self[1]};'
225
+
226
+ def __str__(self) -> str:
227
+ return self.plural_forms
228
+
229
+
230
+ def get_plural(locale: Locale | str | None = None) -> _PluralTuple:
231
+ """A tuple with the information catalogs need to perform proper
232
+ pluralization. The first item of the tuple is the number of plural
233
+ forms, the second the plural expression.
234
+
235
+ :param locale: the `Locale` object or locale identifier. Defaults to the system character type locale.
236
+
237
+ >>> get_plural(locale='en')
238
+ (2, '(n != 1)')
239
+ >>> get_plural(locale='ga')
240
+ (5, '(n==1 ? 0 : n==2 ? 1 : n>=3 && n<=6 ? 2 : n>=7 && n<=10 ? 3 : 4)')
241
+
242
+ The object returned is a special tuple with additional members:
243
+
244
+ >>> tup = get_plural("ja")
245
+ >>> tup.num_plurals
246
+ 1
247
+ >>> tup.plural_expr
248
+ '0'
249
+ >>> tup.plural_forms
250
+ 'nplurals=1; plural=0;'
251
+
252
+ Converting the tuple into a string prints the plural forms for a
253
+ gettext catalog:
254
+
255
+ >>> str(tup)
256
+ 'nplurals=1; plural=0;'
257
+ """
258
+ locale = Locale.parse(locale or LC_CTYPE)
259
+ try:
260
+ tup = PLURALS[str(locale)]
261
+ except KeyError:
262
+ try:
263
+ tup = PLURALS[locale.language]
264
+ except KeyError:
265
+ tup = DEFAULT_PLURAL
266
+ return _PluralTuple(tup)
lib/python3.10/site-packages/babel/messages/pofile.py ADDED
@@ -0,0 +1,744 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ babel.messages.pofile
3
+ ~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Reading and writing of files in the ``gettext`` PO (portable object)
6
+ format.
7
+
8
+ :copyright: (c) 2013-2025 by the Babel Team.
9
+ :license: BSD, see LICENSE for more details.
10
+ """
11
+ from __future__ import annotations
12
+
13
+ import os
14
+ import re
15
+ from collections.abc import Iterable
16
+ from typing import TYPE_CHECKING, Literal
17
+
18
+ from babel.core import Locale
19
+ from babel.messages.catalog import Catalog, Message
20
+ from babel.util import TextWrapper, _cmp
21
+
22
+ if TYPE_CHECKING:
23
+ from typing import IO, AnyStr
24
+
25
+ from _typeshed import SupportsWrite
26
+
27
+
28
+ def unescape(string: str) -> str:
29
+ r"""Reverse `escape` the given string.
30
+
31
+ >>> print(unescape('"Say:\\n \\"hello, world!\\"\\n"'))
32
+ Say:
33
+ "hello, world!"
34
+ <BLANKLINE>
35
+
36
+ :param string: the string to unescape
37
+ """
38
+ def replace_escapes(match):
39
+ m = match.group(1)
40
+ if m == 'n':
41
+ return '\n'
42
+ elif m == 't':
43
+ return '\t'
44
+ elif m == 'r':
45
+ return '\r'
46
+ # m is \ or "
47
+ return m
48
+ return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1])
49
+
50
+
51
+ def denormalize(string: str) -> str:
52
+ r"""Reverse the normalization done by the `normalize` function.
53
+
54
+ >>> print(denormalize(r'''""
55
+ ... "Say:\n"
56
+ ... " \"hello, world!\"\n"'''))
57
+ Say:
58
+ "hello, world!"
59
+ <BLANKLINE>
60
+
61
+ >>> print(denormalize(r'''""
62
+ ... "Say:\n"
63
+ ... " \"Lorem ipsum dolor sit "
64
+ ... "amet, consectetur adipisicing"
65
+ ... " elit, \"\n"'''))
66
+ Say:
67
+ "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
68
+ <BLANKLINE>
69
+
70
+ :param string: the string to denormalize
71
+ """
72
+ if '\n' in string:
73
+ escaped_lines = string.splitlines()
74
+ if string.startswith('""'):
75
+ escaped_lines = escaped_lines[1:]
76
+ lines = map(unescape, escaped_lines)
77
+ return ''.join(lines)
78
+ else:
79
+ return unescape(string)
80
+
81
+
82
+ def _extract_locations(line: str) -> list[str]:
83
+ """Extract locations from location comments.
84
+
85
+ Locations are extracted while properly handling First Strong
86
+ Isolate (U+2068) and Pop Directional Isolate (U+2069), used by
87
+ gettext to enclose filenames with spaces and tabs in their names.
88
+ """
89
+ if "\u2068" not in line and "\u2069" not in line:
90
+ return line.lstrip().split()
91
+
92
+ locations = []
93
+ location = ""
94
+ in_filename = False
95
+ for c in line:
96
+ if c == "\u2068":
97
+ if in_filename:
98
+ raise ValueError("location comment contains more First Strong Isolate "
99
+ "characters, than Pop Directional Isolate characters")
100
+ in_filename = True
101
+ continue
102
+ elif c == "\u2069":
103
+ if not in_filename:
104
+ raise ValueError("location comment contains more Pop Directional Isolate "
105
+ "characters, than First Strong Isolate characters")
106
+ in_filename = False
107
+ continue
108
+ elif c == " ":
109
+ if in_filename:
110
+ location += c
111
+ elif location:
112
+ locations.append(location)
113
+ location = ""
114
+ else:
115
+ location += c
116
+ else:
117
+ if location:
118
+ if in_filename:
119
+ raise ValueError("location comment contains more First Strong Isolate "
120
+ "characters, than Pop Directional Isolate characters")
121
+ locations.append(location)
122
+
123
+ return locations
124
+
125
+
126
+ class PoFileError(Exception):
127
+ """Exception thrown by PoParser when an invalid po file is encountered."""
128
+
129
+ def __init__(self, message: str, catalog: Catalog, line: str, lineno: int) -> None:
130
+ super().__init__(f'{message} on {lineno}')
131
+ self.catalog = catalog
132
+ self.line = line
133
+ self.lineno = lineno
134
+
135
+
136
+ class _NormalizedString:
137
+
138
+ def __init__(self, *args: str) -> None:
139
+ self._strs: list[str] = []
140
+ for arg in args:
141
+ self.append(arg)
142
+
143
+ def append(self, s: str) -> None:
144
+ self._strs.append(s.strip())
145
+
146
+ def denormalize(self) -> str:
147
+ return ''.join(map(unescape, self._strs))
148
+
149
+ def __bool__(self) -> bool:
150
+ return bool(self._strs)
151
+
152
+ def __repr__(self) -> str:
153
+ return os.linesep.join(self._strs)
154
+
155
+ def __cmp__(self, other: object) -> int:
156
+ if not other:
157
+ return 1
158
+
159
+ return _cmp(str(self), str(other))
160
+
161
+ def __gt__(self, other: object) -> bool:
162
+ return self.__cmp__(other) > 0
163
+
164
+ def __lt__(self, other: object) -> bool:
165
+ return self.__cmp__(other) < 0
166
+
167
+ def __ge__(self, other: object) -> bool:
168
+ return self.__cmp__(other) >= 0
169
+
170
+ def __le__(self, other: object) -> bool:
171
+ return self.__cmp__(other) <= 0
172
+
173
+ def __eq__(self, other: object) -> bool:
174
+ return self.__cmp__(other) == 0
175
+
176
+ def __ne__(self, other: object) -> bool:
177
+ return self.__cmp__(other) != 0
178
+
179
+
180
+ class PoFileParser:
181
+ """Support class to read messages from a ``gettext`` PO (portable object) file
182
+ and add them to a `Catalog`
183
+
184
+ See `read_po` for simple cases.
185
+ """
186
+
187
+ _keywords = [
188
+ 'msgid',
189
+ 'msgstr',
190
+ 'msgctxt',
191
+ 'msgid_plural',
192
+ ]
193
+
194
+ def __init__(self, catalog: Catalog, ignore_obsolete: bool = False, abort_invalid: bool = False) -> None:
195
+ self.catalog = catalog
196
+ self.ignore_obsolete = ignore_obsolete
197
+ self.counter = 0
198
+ self.offset = 0
199
+ self.abort_invalid = abort_invalid
200
+ self._reset_message_state()
201
+
202
+ def _reset_message_state(self) -> None:
203
+ self.messages = []
204
+ self.translations = []
205
+ self.locations = []
206
+ self.flags = []
207
+ self.user_comments = []
208
+ self.auto_comments = []
209
+ self.context = None
210
+ self.obsolete = False
211
+ self.in_msgid = False
212
+ self.in_msgstr = False
213
+ self.in_msgctxt = False
214
+
215
+ def _add_message(self) -> None:
216
+ """
217
+ Add a message to the catalog based on the current parser state and
218
+ clear the state ready to process the next message.
219
+ """
220
+ self.translations.sort()
221
+ if len(self.messages) > 1:
222
+ msgid = tuple(m.denormalize() for m in self.messages)
223
+ else:
224
+ msgid = self.messages[0].denormalize()
225
+ if isinstance(msgid, (list, tuple)):
226
+ string = ['' for _ in range(self.catalog.num_plurals)]
227
+ for idx, translation in self.translations:
228
+ if idx >= self.catalog.num_plurals:
229
+ self._invalid_pofile("", self.offset, "msg has more translations than num_plurals of catalog")
230
+ continue
231
+ string[idx] = translation.denormalize()
232
+ string = tuple(string)
233
+ else:
234
+ string = self.translations[0][1].denormalize()
235
+ msgctxt = self.context.denormalize() if self.context else None
236
+ message = Message(msgid, string, list(self.locations), set(self.flags),
237
+ self.auto_comments, self.user_comments, lineno=self.offset + 1,
238
+ context=msgctxt)
239
+ if self.obsolete:
240
+ if not self.ignore_obsolete:
241
+ self.catalog.obsolete[self.catalog._key_for(msgid, msgctxt)] = message
242
+ else:
243
+ self.catalog[msgid] = message
244
+ self.counter += 1
245
+ self._reset_message_state()
246
+
247
+ def _finish_current_message(self) -> None:
248
+ if self.messages:
249
+ if not self.translations:
250
+ self._invalid_pofile("", self.offset, f"missing msgstr for msgid '{self.messages[0].denormalize()}'")
251
+ self.translations.append([0, _NormalizedString("")])
252
+ self._add_message()
253
+
254
+ def _process_message_line(self, lineno, line, obsolete=False) -> None:
255
+ if line.startswith('"'):
256
+ self._process_string_continuation_line(line, lineno)
257
+ else:
258
+ self._process_keyword_line(lineno, line, obsolete)
259
+
260
+ def _process_keyword_line(self, lineno, line, obsolete=False) -> None:
261
+
262
+ for keyword in self._keywords:
263
+ try:
264
+ if line.startswith(keyword) and line[len(keyword)] in [' ', '[']:
265
+ arg = line[len(keyword):]
266
+ break
267
+ except IndexError:
268
+ self._invalid_pofile(line, lineno, "Keyword must be followed by a string")
269
+ else:
270
+ self._invalid_pofile(line, lineno, "Start of line didn't match any expected keyword.")
271
+ return
272
+
273
+ if keyword in ['msgid', 'msgctxt']:
274
+ self._finish_current_message()
275
+
276
+ self.obsolete = obsolete
277
+
278
+ # The line that has the msgid is stored as the offset of the msg
279
+ # should this be the msgctxt if it has one?
280
+ if keyword == 'msgid':
281
+ self.offset = lineno
282
+
283
+ if keyword in ['msgid', 'msgid_plural']:
284
+ self.in_msgctxt = False
285
+ self.in_msgid = True
286
+ self.messages.append(_NormalizedString(arg))
287
+
288
+ elif keyword == 'msgstr':
289
+ self.in_msgid = False
290
+ self.in_msgstr = True
291
+ if arg.startswith('['):
292
+ idx, msg = arg[1:].split(']', 1)
293
+ self.translations.append([int(idx), _NormalizedString(msg)])
294
+ else:
295
+ self.translations.append([0, _NormalizedString(arg)])
296
+
297
+ elif keyword == 'msgctxt':
298
+ self.in_msgctxt = True
299
+ self.context = _NormalizedString(arg)
300
+
301
+ def _process_string_continuation_line(self, line, lineno) -> None:
302
+ if self.in_msgid:
303
+ s = self.messages[-1]
304
+ elif self.in_msgstr:
305
+ s = self.translations[-1][1]
306
+ elif self.in_msgctxt:
307
+ s = self.context
308
+ else:
309
+ self._invalid_pofile(line, lineno, "Got line starting with \" but not in msgid, msgstr or msgctxt")
310
+ return
311
+ s.append(line)
312
+
313
+ def _process_comment(self, line) -> None:
314
+
315
+ self._finish_current_message()
316
+
317
+ if line[1:].startswith(':'):
318
+ for location in _extract_locations(line[2:]):
319
+ pos = location.rfind(':')
320
+ if pos >= 0:
321
+ try:
322
+ lineno = int(location[pos + 1:])
323
+ except ValueError:
324
+ continue
325
+ self.locations.append((location[:pos], lineno))
326
+ else:
327
+ self.locations.append((location, None))
328
+ elif line[1:].startswith(','):
329
+ for flag in line[2:].lstrip().split(','):
330
+ self.flags.append(flag.strip())
331
+ elif line[1:].startswith('.'):
332
+ # These are called auto-comments
333
+ comment = line[2:].strip()
334
+ if comment: # Just check that we're not adding empty comments
335
+ self.auto_comments.append(comment)
336
+ else:
337
+ # These are called user comments
338
+ self.user_comments.append(line[1:].strip())
339
+
340
+ def parse(self, fileobj: IO[AnyStr] | Iterable[AnyStr]) -> None:
341
+ """
342
+ Reads from the file-like object `fileobj` and adds any po file
343
+ units found in it to the `Catalog` supplied to the constructor.
344
+ """
345
+
346
+ for lineno, line in enumerate(fileobj):
347
+ line = line.strip()
348
+ if not isinstance(line, str):
349
+ line = line.decode(self.catalog.charset)
350
+ if not line:
351
+ continue
352
+ if line.startswith('#'):
353
+ if line[1:].startswith('~'):
354
+ self._process_message_line(lineno, line[2:].lstrip(), obsolete=True)
355
+ else:
356
+ try:
357
+ self._process_comment(line)
358
+ except ValueError as exc:
359
+ self._invalid_pofile(line, lineno, str(exc))
360
+ else:
361
+ self._process_message_line(lineno, line)
362
+
363
+ self._finish_current_message()
364
+
365
+ # No actual messages found, but there was some info in comments, from which
366
+ # we'll construct an empty header message
367
+ if not self.counter and (self.flags or self.user_comments or self.auto_comments):
368
+ self.messages.append(_NormalizedString('""'))
369
+ self.translations.append([0, _NormalizedString('""')])
370
+ self._add_message()
371
+
372
+ def _invalid_pofile(self, line, lineno, msg) -> None:
373
+ assert isinstance(line, str)
374
+ if self.abort_invalid:
375
+ raise PoFileError(msg, self.catalog, line, lineno)
376
+ print("WARNING:", msg)
377
+ print(f"WARNING: Problem on line {lineno + 1}: {line!r}")
378
+
379
+
380
+ def read_po(
381
+ fileobj: IO[AnyStr] | Iterable[AnyStr],
382
+ locale: Locale | str | None = None,
383
+ domain: str | None = None,
384
+ ignore_obsolete: bool = False,
385
+ charset: str | None = None,
386
+ abort_invalid: bool = False,
387
+ ) -> Catalog:
388
+ """Read messages from a ``gettext`` PO (portable object) file from the given
389
+ file-like object (or an iterable of lines) and return a `Catalog`.
390
+
391
+ >>> from datetime import datetime
392
+ >>> from io import StringIO
393
+ >>> buf = StringIO('''
394
+ ... #: main.py:1
395
+ ... #, fuzzy, python-format
396
+ ... msgid "foo %(name)s"
397
+ ... msgstr "quux %(name)s"
398
+ ...
399
+ ... # A user comment
400
+ ... #. An auto comment
401
+ ... #: main.py:3
402
+ ... msgid "bar"
403
+ ... msgid_plural "baz"
404
+ ... msgstr[0] "bar"
405
+ ... msgstr[1] "baaz"
406
+ ... ''')
407
+ >>> catalog = read_po(buf)
408
+ >>> catalog.revision_date = datetime(2007, 4, 1)
409
+
410
+ >>> for message in catalog:
411
+ ... if message.id:
412
+ ... print((message.id, message.string))
413
+ ... print(' ', (message.locations, sorted(list(message.flags))))
414
+ ... print(' ', (message.user_comments, message.auto_comments))
415
+ (u'foo %(name)s', u'quux %(name)s')
416
+ ([(u'main.py', 1)], [u'fuzzy', u'python-format'])
417
+ ([], [])
418
+ ((u'bar', u'baz'), (u'bar', u'baaz'))
419
+ ([(u'main.py', 3)], [])
420
+ ([u'A user comment'], [u'An auto comment'])
421
+
422
+ .. versionadded:: 1.0
423
+ Added support for explicit charset argument.
424
+
425
+ :param fileobj: the file-like object (or iterable of lines) to read the PO file from
426
+ :param locale: the locale identifier or `Locale` object, or `None`
427
+ if the catalog is not bound to a locale (which basically
428
+ means it's a template)
429
+ :param domain: the message domain
430
+ :param ignore_obsolete: whether to ignore obsolete messages in the input
431
+ :param charset: the character set of the catalog.
432
+ :param abort_invalid: abort read if po file is invalid
433
+ """
434
+ catalog = Catalog(locale=locale, domain=domain, charset=charset)
435
+ parser = PoFileParser(catalog, ignore_obsolete, abort_invalid=abort_invalid)
436
+ parser.parse(fileobj)
437
+ return catalog
438
+
439
+
440
+ WORD_SEP = re.compile('('
441
+ r'\s+|' # any whitespace
442
+ r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
443
+ r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
444
+ ')')
445
+
446
+
447
+ def escape(string: str) -> str:
448
+ r"""Escape the given string so that it can be included in double-quoted
449
+ strings in ``PO`` files.
450
+
451
+ >>> escape('''Say:
452
+ ... "hello, world!"
453
+ ... ''')
454
+ '"Say:\\n \\"hello, world!\\"\\n"'
455
+
456
+ :param string: the string to escape
457
+ """
458
+ return '"%s"' % string.replace('\\', '\\\\') \
459
+ .replace('\t', '\\t') \
460
+ .replace('\r', '\\r') \
461
+ .replace('\n', '\\n') \
462
+ .replace('\"', '\\"')
463
+
464
+
465
+ def normalize(string: str, prefix: str = '', width: int = 76) -> str:
466
+ r"""Convert a string into a format that is appropriate for .po files.
467
+
468
+ >>> print(normalize('''Say:
469
+ ... "hello, world!"
470
+ ... ''', width=None))
471
+ ""
472
+ "Say:\n"
473
+ " \"hello, world!\"\n"
474
+
475
+ >>> print(normalize('''Say:
476
+ ... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
477
+ ... ''', width=32))
478
+ ""
479
+ "Say:\n"
480
+ " \"Lorem ipsum dolor sit "
481
+ "amet, consectetur adipisicing"
482
+ " elit, \"\n"
483
+
484
+ :param string: the string to normalize
485
+ :param prefix: a string that should be prepended to every line
486
+ :param width: the maximum line width; use `None`, 0, or a negative number
487
+ to completely disable line wrapping
488
+ """
489
+ if width and width > 0:
490
+ prefixlen = len(prefix)
491
+ lines = []
492
+ for line in string.splitlines(True):
493
+ if len(escape(line)) + prefixlen > width:
494
+ chunks = WORD_SEP.split(line)
495
+ chunks.reverse()
496
+ while chunks:
497
+ buf = []
498
+ size = 2
499
+ while chunks:
500
+ length = len(escape(chunks[-1])) - 2 + prefixlen
501
+ if size + length < width:
502
+ buf.append(chunks.pop())
503
+ size += length
504
+ else:
505
+ if not buf:
506
+ # handle long chunks by putting them on a
507
+ # separate line
508
+ buf.append(chunks.pop())
509
+ break
510
+ lines.append(''.join(buf))
511
+ else:
512
+ lines.append(line)
513
+ else:
514
+ lines = string.splitlines(True)
515
+
516
+ if len(lines) <= 1:
517
+ return escape(string)
518
+
519
+ # Remove empty trailing line
520
+ if lines and not lines[-1]:
521
+ del lines[-1]
522
+ lines[-1] += '\n'
523
+ return '""\n' + '\n'.join([(prefix + escape(line)) for line in lines])
524
+
525
+
526
+ def _enclose_filename_if_necessary(filename: str) -> str:
527
+ """Enclose filenames which include white spaces or tabs.
528
+
529
+ Do the same as gettext and enclose filenames which contain white
530
+ spaces or tabs with First Strong Isolate (U+2068) and Pop
531
+ Directional Isolate (U+2069).
532
+ """
533
+ if " " not in filename and "\t" not in filename:
534
+ return filename
535
+
536
+ if not filename.startswith("\u2068"):
537
+ filename = "\u2068" + filename
538
+ if not filename.endswith("\u2069"):
539
+ filename += "\u2069"
540
+ return filename
541
+
542
+
543
+ def write_po(
544
+ fileobj: SupportsWrite[bytes],
545
+ catalog: Catalog,
546
+ width: int = 76,
547
+ no_location: bool = False,
548
+ omit_header: bool = False,
549
+ sort_output: bool = False,
550
+ sort_by_file: bool = False,
551
+ ignore_obsolete: bool = False,
552
+ include_previous: bool = False,
553
+ include_lineno: bool = True,
554
+ ) -> None:
555
+ r"""Write a ``gettext`` PO (portable object) template file for a given
556
+ message catalog to the provided file-like object.
557
+
558
+ >>> catalog = Catalog()
559
+ >>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
560
+ ... flags=('fuzzy',))
561
+ <Message...>
562
+ >>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
563
+ <Message...>
564
+ >>> from io import BytesIO
565
+ >>> buf = BytesIO()
566
+ >>> write_po(buf, catalog, omit_header=True)
567
+ >>> print(buf.getvalue().decode("utf8"))
568
+ #: main.py:1
569
+ #, fuzzy, python-format
570
+ msgid "foo %(name)s"
571
+ msgstr ""
572
+ <BLANKLINE>
573
+ #: main.py:3
574
+ msgid "bar"
575
+ msgid_plural "baz"
576
+ msgstr[0] ""
577
+ msgstr[1] ""
578
+ <BLANKLINE>
579
+ <BLANKLINE>
580
+
581
+ :param fileobj: the file-like object to write to
582
+ :param catalog: the `Catalog` instance
583
+ :param width: the maximum line width for the generated output; use `None`,
584
+ 0, or a negative number to completely disable line wrapping
585
+ :param no_location: do not emit a location comment for every message
586
+ :param omit_header: do not include the ``msgid ""`` entry at the top of the
587
+ output
588
+ :param sort_output: whether to sort the messages in the output by msgid
589
+ :param sort_by_file: whether to sort the messages in the output by their
590
+ locations
591
+ :param ignore_obsolete: whether to ignore obsolete messages and not include
592
+ them in the output; by default they are included as
593
+ comments
594
+ :param include_previous: include the old msgid as a comment when
595
+ updating the catalog
596
+ :param include_lineno: include line number in the location comment
597
+ """
598
+
599
+ sort_by = None
600
+ if sort_output:
601
+ sort_by = "message"
602
+ elif sort_by_file:
603
+ sort_by = "location"
604
+
605
+ for line in generate_po(
606
+ catalog,
607
+ ignore_obsolete=ignore_obsolete,
608
+ include_lineno=include_lineno,
609
+ include_previous=include_previous,
610
+ no_location=no_location,
611
+ omit_header=omit_header,
612
+ sort_by=sort_by,
613
+ width=width,
614
+ ):
615
+ if isinstance(line, str):
616
+ line = line.encode(catalog.charset, 'backslashreplace')
617
+ fileobj.write(line)
618
+
619
+
620
+ def generate_po(
621
+ catalog: Catalog,
622
+ *,
623
+ ignore_obsolete: bool = False,
624
+ include_lineno: bool = True,
625
+ include_previous: bool = False,
626
+ no_location: bool = False,
627
+ omit_header: bool = False,
628
+ sort_by: Literal["message", "location"] | None = None,
629
+ width: int = 76,
630
+ ) -> Iterable[str]:
631
+ r"""Yield text strings representing a ``gettext`` PO (portable object) file.
632
+
633
+ See `write_po()` for a more detailed description.
634
+ """
635
+ # xgettext always wraps comments even if --no-wrap is passed;
636
+ # provide the same behaviour
637
+ comment_width = width if width and width > 0 else 76
638
+
639
+ comment_wrapper = TextWrapper(width=comment_width, break_long_words=False)
640
+ header_wrapper = TextWrapper(width=width, subsequent_indent="# ", break_long_words=False)
641
+
642
+ def _format_comment(comment, prefix=''):
643
+ for line in comment_wrapper.wrap(comment):
644
+ yield f"#{prefix} {line.strip()}\n"
645
+
646
+ def _format_message(message, prefix=''):
647
+ if isinstance(message.id, (list, tuple)):
648
+ if message.context:
649
+ yield f"{prefix}msgctxt {normalize(message.context, prefix=prefix, width=width)}\n"
650
+ yield f"{prefix}msgid {normalize(message.id[0], prefix=prefix, width=width)}\n"
651
+ yield f"{prefix}msgid_plural {normalize(message.id[1], prefix=prefix, width=width)}\n"
652
+
653
+ for idx in range(catalog.num_plurals):
654
+ try:
655
+ string = message.string[idx]
656
+ except IndexError:
657
+ string = ''
658
+ yield f"{prefix}msgstr[{idx:d}] {normalize(string, prefix=prefix, width=width)}\n"
659
+ else:
660
+ if message.context:
661
+ yield f"{prefix}msgctxt {normalize(message.context, prefix=prefix, width=width)}\n"
662
+ yield f"{prefix}msgid {normalize(message.id, prefix=prefix, width=width)}\n"
663
+ yield f"{prefix}msgstr {normalize(message.string or '', prefix=prefix, width=width)}\n"
664
+
665
+ for message in _sort_messages(catalog, sort_by=sort_by):
666
+ if not message.id: # This is the header "message"
667
+ if omit_header:
668
+ continue
669
+ comment_header = catalog.header_comment
670
+ if width and width > 0:
671
+ lines = []
672
+ for line in comment_header.splitlines():
673
+ lines += header_wrapper.wrap(line)
674
+ comment_header = '\n'.join(lines)
675
+ yield f"{comment_header}\n"
676
+
677
+ for comment in message.user_comments:
678
+ yield from _format_comment(comment)
679
+ for comment in message.auto_comments:
680
+ yield from _format_comment(comment, prefix='.')
681
+
682
+ if not no_location:
683
+ locs = []
684
+
685
+ # sort locations by filename and lineno.
686
+ # if there's no <int> as lineno, use `-1`.
687
+ # if no sorting possible, leave unsorted.
688
+ # (see issue #606)
689
+ try:
690
+ locations = sorted(message.locations,
691
+ key=lambda x: (x[0], isinstance(x[1], int) and x[1] or -1))
692
+ except TypeError: # e.g. "TypeError: unorderable types: NoneType() < int()"
693
+ locations = message.locations
694
+
695
+ for filename, lineno in locations:
696
+ location = filename.replace(os.sep, '/')
697
+ location = _enclose_filename_if_necessary(location)
698
+ if lineno and include_lineno:
699
+ location = f"{location}:{lineno:d}"
700
+ if location not in locs:
701
+ locs.append(location)
702
+ yield from _format_comment(' '.join(locs), prefix=':')
703
+ if message.flags:
704
+ yield f"#{', '.join(['', *sorted(message.flags)])}\n"
705
+
706
+ if message.previous_id and include_previous:
707
+ yield from _format_comment(
708
+ f'msgid {normalize(message.previous_id[0], width=width)}',
709
+ prefix='|',
710
+ )
711
+ if len(message.previous_id) > 1:
712
+ norm_previous_id = normalize(message.previous_id[1], width=width)
713
+ yield from _format_comment(f'msgid_plural {norm_previous_id}', prefix='|')
714
+
715
+ yield from _format_message(message)
716
+ yield '\n'
717
+
718
+ if not ignore_obsolete:
719
+ for message in _sort_messages(
720
+ catalog.obsolete.values(),
721
+ sort_by=sort_by,
722
+ ):
723
+ for comment in message.user_comments:
724
+ yield from _format_comment(comment)
725
+ yield from _format_message(message, prefix='#~ ')
726
+ yield '\n'
727
+
728
+
729
+ def _sort_messages(messages: Iterable[Message], sort_by: Literal["message", "location"] | None) -> list[Message]:
730
+ """
731
+ Sort the given message iterable by the given criteria.
732
+
733
+ Always returns a list.
734
+
735
+ :param messages: An iterable of Messages.
736
+ :param sort_by: Sort by which criteria? Options are `message` and `location`.
737
+ :return: list[Message]
738
+ """
739
+ messages = list(messages)
740
+ if sort_by == "message":
741
+ messages.sort()
742
+ elif sort_by == "location":
743
+ messages.sort(key=lambda m: m.locations)
744
+ return messages
lib/python3.10/site-packages/babel/messages/setuptools_frontend.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from babel.messages import frontend
4
+
5
+ try:
6
+ # See: https://setuptools.pypa.io/en/latest/deprecated/distutils-legacy.html
7
+ from setuptools import Command
8
+
9
+ try:
10
+ from setuptools.errors import BaseError, OptionError, SetupError
11
+ except ImportError: # Error aliases only added in setuptools 59 (2021-11).
12
+ OptionError = SetupError = BaseError = Exception
13
+
14
+ except ImportError:
15
+ from distutils.cmd import Command
16
+ from distutils.errors import DistutilsSetupError as SetupError
17
+
18
+
19
+ def check_message_extractors(dist, name, value):
20
+ """Validate the ``message_extractors`` keyword argument to ``setup()``.
21
+
22
+ :param dist: the distutils/setuptools ``Distribution`` object
23
+ :param name: the name of the keyword argument (should always be
24
+ "message_extractors")
25
+ :param value: the value of the keyword argument
26
+ :raise `DistutilsSetupError`: if the value is not valid
27
+ """
28
+ assert name == "message_extractors"
29
+ if not isinstance(value, dict):
30
+ raise SetupError(
31
+ 'the value of the "message_extractors" parameter must be a dictionary',
32
+ )
33
+
34
+
35
+ class compile_catalog(frontend.CompileCatalog, Command):
36
+ """Catalog compilation command for use in ``setup.py`` scripts.
37
+
38
+ If correctly installed, this command is available to Setuptools-using
39
+ setup scripts automatically. For projects using plain old ``distutils``,
40
+ the command needs to be registered explicitly in ``setup.py``::
41
+
42
+ from babel.messages.setuptools_frontend import compile_catalog
43
+
44
+ setup(
45
+ ...
46
+ cmdclass = {'compile_catalog': compile_catalog}
47
+ )
48
+
49
+ .. versionadded:: 0.9
50
+ """
51
+
52
+
53
+ class extract_messages(frontend.ExtractMessages, Command):
54
+ """Message extraction command for use in ``setup.py`` scripts.
55
+
56
+ If correctly installed, this command is available to Setuptools-using
57
+ setup scripts automatically. For projects using plain old ``distutils``,
58
+ the command needs to be registered explicitly in ``setup.py``::
59
+
60
+ from babel.messages.setuptools_frontend import extract_messages
61
+
62
+ setup(
63
+ ...
64
+ cmdclass = {'extract_messages': extract_messages}
65
+ )
66
+ """
67
+
68
+
69
+ class init_catalog(frontend.InitCatalog, Command):
70
+ """New catalog initialization command for use in ``setup.py`` scripts.
71
+
72
+ If correctly installed, this command is available to Setuptools-using
73
+ setup scripts automatically. For projects using plain old ``distutils``,
74
+ the command needs to be registered explicitly in ``setup.py``::
75
+
76
+ from babel.messages.setuptools_frontend import init_catalog
77
+
78
+ setup(
79
+ ...
80
+ cmdclass = {'init_catalog': init_catalog}
81
+ )
82
+ """
83
+
84
+
85
+ class update_catalog(frontend.UpdateCatalog, Command):
86
+ """Catalog merging command for use in ``setup.py`` scripts.
87
+
88
+ If correctly installed, this command is available to Setuptools-using
89
+ setup scripts automatically. For projects using plain old ``distutils``,
90
+ the command needs to be registered explicitly in ``setup.py``::
91
+
92
+ from babel.messages.setuptools_frontend import update_catalog
93
+
94
+ setup(
95
+ ...
96
+ cmdclass = {'update_catalog': update_catalog}
97
+ )
98
+
99
+ .. versionadded:: 0.9
100
+ """
101
+
102
+
103
+ COMMANDS = {
104
+ "compile_catalog": compile_catalog,
105
+ "extract_messages": extract_messages,
106
+ "init_catalog": init_catalog,
107
+ "update_catalog": update_catalog,
108
+ }
lib/python3.10/site-packages/numba/__init__.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Expose top-level symbols that are safe for import *
3
+ """
4
+
5
+ import platform
6
+ import re
7
+ import sys
8
+ import warnings
9
+
10
+
11
+ # ---------------------- WARNING WARNING WARNING ----------------------------
12
+ # THIS MUST RUN FIRST, DO NOT MOVE... SEE DOCSTRING IN _ensure_critical_deps
13
+ def _ensure_critical_deps():
14
+ """
15
+ Make sure the Python, NumPy and SciPy present are supported versions.
16
+ This has to be done _before_ importing anything from Numba such that
17
+ incompatible versions can be reported to the user. If this occurs _after_
18
+ importing things from Numba and there's an issue in e.g. a Numba c-ext, a
19
+ SystemError might have occurred which prevents reporting the likely cause of
20
+ the problem (incompatible versions of critical dependencies).
21
+ """
22
+ #NOTE THIS CODE SHOULD NOT IMPORT ANYTHING FROM NUMBA!
23
+
24
+ def extract_version(mod):
25
+ return tuple(map(int, mod.__version__.split('.')[:2]))
26
+
27
+ PYVERSION = sys.version_info[:2]
28
+
29
+ if PYVERSION < (3, 10):
30
+ msg = ("Numba needs Python 3.10 or greater. Got Python "
31
+ f"{PYVERSION[0]}.{PYVERSION[1]}.")
32
+ raise ImportError(msg)
33
+
34
+ import numpy as np
35
+ numpy_version = extract_version(np)
36
+
37
+ if numpy_version < (1, 24):
38
+ msg = (f"Numba needs NumPy 1.24 or greater. Got NumPy "
39
+ f"{numpy_version[0]}.{numpy_version[1]}.")
40
+ raise ImportError(msg)
41
+
42
+ if numpy_version > (2, 1):
43
+ msg = (f"Numba needs NumPy 2.1 or less. Got NumPy "
44
+ f"{numpy_version[0]}.{numpy_version[1]}.")
45
+ raise ImportError(msg)
46
+
47
+ try:
48
+ import scipy
49
+ except ImportError:
50
+ pass
51
+ else:
52
+ sp_version = extract_version(scipy)
53
+ if sp_version < (1, 0):
54
+ msg = ("Numba requires SciPy version 1.0 or greater. Got SciPy "
55
+ f"{scipy.__version__}.")
56
+ raise ImportError(msg)
57
+
58
+
59
+ _ensure_critical_deps()
60
+ # END DO NOT MOVE
61
+ # ---------------------- WARNING WARNING WARNING ----------------------------
62
+
63
+
64
+ from ._version import get_versions
65
+ from numba.misc.init_utils import generate_version_info
66
+
67
+ __version__ = get_versions()['version']
68
+ version_info = generate_version_info(__version__)
69
+ del get_versions
70
+ del generate_version_info
71
+
72
+
73
+ from numba.core import config
74
+ from numba.core import types, errors
75
+
76
+ # Re-export typeof
77
+ from numba.misc.special import (
78
+ typeof, prange, pndindex, gdb, gdb_breakpoint, gdb_init,
79
+ literally, literal_unroll,
80
+ )
81
+
82
+ # Re-export error classes
83
+ from numba.core.errors import *
84
+
85
+ # Re-export types itself
86
+ import numba.core.types as types
87
+
88
+ # Re-export all type names
89
+ from numba.core.types import *
90
+
91
+ # Re-export decorators
92
+ from numba.core.decorators import (cfunc, jit, njit, stencil,
93
+ jit_module)
94
+
95
+ # Re-export vectorize decorators and the thread layer querying function
96
+ from numba.np.ufunc import (vectorize, guvectorize, threading_layer,
97
+ get_num_threads, set_num_threads,
98
+ set_parallel_chunksize, get_parallel_chunksize,
99
+ get_thread_id)
100
+
101
+ # Re-export Numpy helpers
102
+ from numba.np.numpy_support import carray, farray, from_dtype
103
+
104
+ # Re-export experimental
105
+ from numba import experimental
106
+
107
+ # Initialize withcontexts
108
+ import numba.core.withcontexts
109
+ from numba.core.withcontexts import objmode_context as objmode
110
+ from numba.core.withcontexts import parallel_chunksize
111
+
112
+ # Initialize target extensions
113
+ import numba.core.target_extension
114
+
115
+ # Initialize typed containers
116
+ import numba.typed
117
+
118
+ # Keep this for backward compatibility.
119
+ def test(argv, **kwds):
120
+ # To speed up the import time, avoid importing `unittest` and other test
121
+ # dependencies unless the user is actually trying to run tests.
122
+ from numba.testing import _runtests as runtests
123
+ return runtests.main(argv, **kwds)
124
+
125
+ __all__ = """
126
+ cfunc
127
+ from_dtype
128
+ guvectorize
129
+ jit
130
+ experimental
131
+ njit
132
+ stencil
133
+ jit_module
134
+ typeof
135
+ prange
136
+ gdb
137
+ gdb_breakpoint
138
+ gdb_init
139
+ vectorize
140
+ objmode
141
+ literal_unroll
142
+ get_num_threads
143
+ set_num_threads
144
+ set_parallel_chunksize
145
+ get_parallel_chunksize
146
+ parallel_chunksize
147
+ """.split() + types.__all__ + errors.__all__
148
+
149
+
150
+ _min_llvmlite_version = (0, 44, 0)
151
+ _min_llvm_version = (14, 0, 0)
152
+
153
+ def _ensure_llvm():
154
+ """
155
+ Make sure llvmlite is operational.
156
+ """
157
+ import warnings
158
+ import llvmlite
159
+
160
+ # Only look at the the major, minor and bugfix version numbers.
161
+ # Ignore other stuffs
162
+ regex = re.compile(r'(\d+)\.(\d+).(\d+)')
163
+ m = regex.match(llvmlite.__version__)
164
+ if m:
165
+ ver = tuple(map(int, m.groups()))
166
+ if ver < _min_llvmlite_version:
167
+ msg = ("Numba requires at least version %d.%d.%d of llvmlite.\n"
168
+ "Installed version is %s.\n"
169
+ "Please update llvmlite." %
170
+ (_min_llvmlite_version + (llvmlite.__version__,)))
171
+ raise ImportError(msg)
172
+ else:
173
+ # Not matching?
174
+ warnings.warn("llvmlite version format not recognized!")
175
+
176
+ from llvmlite.binding import llvm_version_info, check_jit_execution
177
+
178
+ if llvm_version_info < _min_llvm_version:
179
+ msg = ("Numba requires at least version %d.%d.%d of LLVM.\n"
180
+ "Installed llvmlite is built against version %d.%d.%d.\n"
181
+ "Please update llvmlite." %
182
+ (_min_llvm_version + llvm_version_info))
183
+ raise ImportError(msg)
184
+
185
+ check_jit_execution()
186
+
187
+
188
+ def _try_enable_svml():
189
+ """
190
+ Tries to enable SVML if configuration permits use and the library is found.
191
+ """
192
+ if not config.DISABLE_INTEL_SVML:
193
+ try:
194
+ if sys.platform.startswith('linux'):
195
+ llvmlite.binding.load_library_permanently("libsvml.so")
196
+ elif sys.platform.startswith('darwin'):
197
+ llvmlite.binding.load_library_permanently("libsvml.dylib")
198
+ elif sys.platform.startswith('win'):
199
+ llvmlite.binding.load_library_permanently("svml_dispmd")
200
+ else:
201
+ return False
202
+ # The SVML library is loaded, therefore SVML *could* be supported.
203
+ # Now see if LLVM has been compiled with the SVML support patch.
204
+ # If llvmlite has the checking function `has_svml` and it returns
205
+ # True, then LLVM was compiled with SVML support and the the setup
206
+ # for SVML can proceed. We err on the side of caution and if the
207
+ # checking function is missing, regardless of that being fine for
208
+ # most 0.23.{0,1} llvmlite instances (i.e. conda or pip installed),
209
+ # we assume that SVML was not compiled in. llvmlite 0.23.2 is a
210
+ # bugfix release with the checking function present that will always
211
+ # produce correct behaviour. For context see: #3006.
212
+ try:
213
+ if not getattr(llvmlite.binding.targets, "has_svml")():
214
+ # has detection function, but no svml compiled in, therefore
215
+ # disable SVML
216
+ return False
217
+ except AttributeError:
218
+ if platform.machine() == 'x86_64' and config.DEBUG:
219
+ msg = ("SVML was found but llvmlite >= 0.23.2 is "
220
+ "needed to support it.")
221
+ warnings.warn(msg)
222
+ # does not have detection function, cannot detect reliably,
223
+ # disable SVML.
224
+ return False
225
+
226
+ # All is well, detection function present and reports SVML is
227
+ # compiled in, set the vector library to SVML.
228
+ llvmlite.binding.set_option('SVML', '-vector-library=SVML')
229
+ return True
230
+ except:
231
+ if platform.machine() == 'x86_64' and config.DEBUG:
232
+ warnings.warn("SVML was not found/could not be loaded.")
233
+ return False
234
+
235
+ _ensure_llvm()
236
+
237
+ # we know llvmlite is working as the above tests passed, import it now as SVML
238
+ # needs to mutate runtime options (sets the `-vector-library`).
239
+ import llvmlite
240
+
241
+ """
242
+ Is set to True if Intel SVML is in use.
243
+ """
244
+ config.USING_SVML = _try_enable_svml()
245
+
246
+
247
+ # ---------------------- WARNING WARNING WARNING ----------------------------
248
+ # The following imports occur below here (SVML init) because somewhere in their
249
+ # import sequence they have a `@njit` wrapped function. This triggers too early
250
+ # a bind to the underlying LLVM libraries which then irretrievably sets the LLVM
251
+ # SVML state to "no SVML". See https://github.com/numba/numba/issues/4689 for
252
+ # context.
253
+ # ---------------------- WARNING WARNING WARNING ----------------------------
lib/python3.10/site-packages/numba/__main__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """Expose Numba command via ``python -m numba``."""
2
+ import sys
3
+ from numba.misc.numba_entry import main
4
+
5
+ if __name__ == '__main__':
6
+ sys.exit(main())
lib/python3.10/site-packages/numba/_arraystruct.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NUMBA_ARYSTRUCT_H_
2
+ #define NUMBA_ARYSTRUCT_H_
3
+ /*
4
+ * Fill in the *arystruct* with information from the Numpy array *obj*.
5
+ * *arystruct*'s layout is defined in numba.targets.arrayobj (look
6
+ * for the ArrayTemplate class).
7
+ */
8
+
9
+ typedef struct {
10
+ void *meminfo; /* see _nrt_python.c and nrt.h in numba/core/runtime */
11
+ PyObject *parent;
12
+ npy_intp nitems;
13
+ npy_intp itemsize;
14
+ void *data;
15
+
16
+ npy_intp shape_and_strides[];
17
+ } arystruct_t;
18
+
19
+
20
+ #endif /* NUMBA_ARYSTRUCT_H_ */
21
+
lib/python3.10/site-packages/numba/_devicearray.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (46.3 kB). View file
 
lib/python3.10/site-packages/numba/_devicearray.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NUMBA_DEVICEARRAY_H_
2
+ #define NUMBA_DEVICEARRAY_H_
3
+
4
+ #ifdef __cplusplus
5
+ extern "C" {
6
+ #endif
7
+
8
+ /* These definitions should only be used by consumers of the Device Array API.
9
+ * Consumers access the API through the opaque pointer stored in
10
+ * _devicearray._DEVICEARRAY_API. We don't want these definitions in
11
+ * _devicearray.cpp itself because they would conflict with the actual
12
+ * implementations there.
13
+ */
14
+ #ifndef NUMBA_IN_DEVICEARRAY_CPP_
15
+
16
+ extern void **DeviceArray_API;
17
+ #define DeviceArrayType (*(PyTypeObject*)DeviceArray_API[0])
18
+
19
+ #endif /* ndef NUMBA_IN_DEVICEARRAY_CPP */
20
+
21
+ #ifdef __cplusplus
22
+ }
23
+ #endif
24
+
25
+ #endif /* NUMBA_DEVICEARRAY_H_ */
lib/python3.10/site-packages/numba/_dynfunc.c ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Definition of Environment and Closure objects.
3
+ * This module is included by _dynfuncmod.c and by pycc-compiled modules.
4
+ */
5
+
6
+ #include "_pymodule.h"
7
+
8
+ #include <string.h>
9
+
10
+
11
+ // if python version is 3.13
12
+ #if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 13)
13
+ #include "pythoncapi_compat.h"
14
+ #define _Py_IsFinalizing Py_IsFinalizing
15
+ #endif
16
+ /* NOTE: EnvironmentObject and ClosureObject must be kept in sync with
17
+ * the definitions in numba/targets/base.py (EnvBody and ClosureBody).
18
+ */
19
+
20
+ /*
21
+ * EnvironmentObject hosts data needed for execution of compiled functions.
22
+ */
23
+ typedef struct {
24
+ PyObject_HEAD
25
+ PyObject *globals;
26
+ /* Assorted "constants" that are needed at runtime to execute
27
+ the compiled function. This can include frozen closure variables,
28
+ lifted loops, etc. */
29
+ PyObject *consts;
30
+ } EnvironmentObject;
31
+
32
+
33
+ static PyMemberDef env_members[] = {
34
+ {"globals", T_OBJECT, offsetof(EnvironmentObject, globals), READONLY, NULL},
35
+ {"consts", T_OBJECT, offsetof(EnvironmentObject, consts), READONLY, NULL},
36
+ {NULL} /* Sentinel */
37
+ };
38
+
39
+ static int
40
+ env_traverse(EnvironmentObject *env, visitproc visit, void *arg)
41
+ {
42
+ Py_VISIT(env->globals);
43
+ Py_VISIT(env->consts);
44
+ return 0;
45
+ }
46
+
47
+ static int
48
+ env_clear(EnvironmentObject *env)
49
+ {
50
+ Py_CLEAR(env->globals);
51
+ Py_CLEAR(env->consts);
52
+ return 0;
53
+ }
54
+
55
+ static void
56
+ env_dealloc(EnvironmentObject *env)
57
+ {
58
+ PyObject_GC_UnTrack((PyObject *) env);
59
+ env_clear(env);
60
+ Py_TYPE(env)->tp_free((PyObject *) env);
61
+ }
62
+
63
+ static EnvironmentObject *
64
+ env_new_empty(PyTypeObject* type)
65
+ {
66
+ return (EnvironmentObject *) PyType_GenericNew(type, NULL, NULL);
67
+ }
68
+
69
+ static PyObject *
70
+ env_new(PyTypeObject* type, PyObject* args, PyObject* kwds)
71
+ {
72
+ PyObject *globals;
73
+ EnvironmentObject *env;
74
+ static char *kwlist[] = {"globals", 0};
75
+
76
+ if (!PyArg_ParseTupleAndKeywords(
77
+ args, kwds, "O!:function", kwlist,
78
+ &PyDict_Type, &globals))
79
+ return NULL;
80
+
81
+ env = env_new_empty(type);
82
+ if (env == NULL)
83
+ return NULL;
84
+ Py_INCREF(globals);
85
+ env->globals = globals;
86
+ env->consts = PyList_New(0);
87
+ if (!env->consts) {
88
+ Py_DECREF(env);
89
+ return NULL;
90
+ }
91
+ return (PyObject *) env;
92
+ }
93
+
94
+
95
+ static PyTypeObject EnvironmentType = {
96
+ PyVarObject_HEAD_INIT(NULL, 0)
97
+ "_dynfunc.Environment", /* tp_name */
98
+ sizeof(EnvironmentObject), /* tp_basicsize */
99
+ 0, /* tp_itemsize */
100
+ (destructor) env_dealloc, /* tp_dealloc */
101
+ 0, /* tp_vectorcall_offset */
102
+ 0, /* tp_getattr*/
103
+ 0, /* tp_setattr */
104
+ 0, /* tp_as_async */
105
+ 0, /* tp_repr */
106
+ 0, /* tp_as_number */
107
+ 0, /* tp_as_sequence */
108
+ 0, /* tp_as_mapping */
109
+ 0, /* tp_hash */
110
+ 0, /* tp_call */
111
+ 0, /* tp_str */
112
+ 0, /* tp_getattro */
113
+ 0, /* tp_setattro */
114
+ 0, /* tp_as_buffer */
115
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */
116
+ 0, /* tp_doc */
117
+ (traverseproc) env_traverse, /* tp_traverse */
118
+ (inquiry) env_clear, /* tp_clear */
119
+ 0, /* tp_richcompare */
120
+ 0, /* tp_weaklistoffset */
121
+ 0, /* tp_iter */
122
+ 0, /* tp_iternext */
123
+ 0, /* tp_methods */
124
+ env_members, /* tp_members */
125
+ 0, /* tp_getset */
126
+ 0, /* tp_base */
127
+ 0, /* tp_dict */
128
+ 0, /* tp_descr_get */
129
+ 0, /* tp_descr_set */
130
+ 0, /* tp_dictoffset */
131
+ 0, /* tp_init */
132
+ 0, /* tp_alloc */
133
+ env_new, /* tp_new */
134
+ 0, /* tp_free */
135
+ 0, /* tp_is_gc */
136
+ 0, /* tp_bases */
137
+ 0, /* tp_mro */
138
+ 0, /* tp_cache */
139
+ 0, /* tp_subclasses */
140
+ 0, /* tp_weaklist */
141
+ 0, /* tp_del */
142
+ 0, /* tp_version_tag */
143
+ 0, /* tp_finalize */
144
+ 0, /* tp_vectorcall */
145
+ #if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12)
146
+ /* This was introduced first in 3.12
147
+ * https://github.com/python/cpython/issues/91051
148
+ */
149
+ 0, /* tp_watched */
150
+ #endif
151
+
152
+ /* WARNING: Do not remove this, only modify it! It is a version guard to
153
+ * act as a reminder to update this struct on Python version update! */
154
+ #if (PY_MAJOR_VERSION == 3)
155
+ #if ! (NB_SUPPORTED_PYTHON_MINOR)
156
+ #error "Python minor version is not supported."
157
+ #endif
158
+ #else
159
+ #error "Python major version is not supported."
160
+ #endif
161
+ /* END WARNING*/
162
+ };
163
+
164
+ /* A closure object is created for each call to make_function(), and stored
165
+ as the resulting PyCFunction object's "self" pointer. It points to an
166
+ EnvironmentObject which is constructed during compilation. This allows
167
+ for two things:
168
+ - lifetime management of dependent data (e.g. lifted loop dispatchers)
169
+ - access to the execution environment by the compiled function
170
+ (for example the globals module)
171
+ */
172
+
173
+ /* Closure is a variable-sized object for binary compatibility with
174
+ Generator (see below). */
175
+ #define CLOSURE_HEAD \
176
+ PyObject_VAR_HEAD \
177
+ EnvironmentObject *env;
178
+
179
+ typedef struct {
180
+ CLOSURE_HEAD
181
+ /* The dynamically-filled method definition for the PyCFunction object
182
+ using this closure. */
183
+ PyMethodDef def;
184
+ /* Arbitrary object to keep alive during the closure's lifetime.
185
+ (put a tuple to put several objects alive).
186
+ In practice, this helps keep the LLVM module and its generated
187
+ code alive. */
188
+ PyObject *keepalive;
189
+ PyObject *weakreflist;
190
+ } ClosureObject;
191
+
192
+
193
+ static int
194
+ closure_traverse(ClosureObject *clo, visitproc visit, void *arg)
195
+ {
196
+ Py_VISIT(clo->env);
197
+ Py_VISIT(clo->keepalive);
198
+ return 0;
199
+ }
200
+
201
+ static void
202
+ closure_dealloc(ClosureObject *clo)
203
+ {
204
+ PyObject_GC_UnTrack((PyObject *) clo);
205
+ if (clo->weakreflist != NULL)
206
+ PyObject_ClearWeakRefs((PyObject *) clo);
207
+ PyObject_Free((void *) clo->def.ml_name);
208
+ PyObject_Free((void *) clo->def.ml_doc);
209
+ Py_XDECREF(clo->env);
210
+ Py_XDECREF(clo->keepalive);
211
+ Py_TYPE(clo)->tp_free((PyObject *) clo);
212
+ }
213
+
214
+ static PyTypeObject ClosureType = {
215
+ PyVarObject_HEAD_INIT(NULL, 0)
216
+ "_dynfunc._Closure", /* tp_name */
217
+ sizeof(ClosureObject), /* tp_basicsize */
218
+ 0, /* tp_itemsize */
219
+ (destructor) closure_dealloc, /* tp_dealloc */
220
+ 0, /* tp_vectorcall_offset */
221
+ 0, /* tp_getattr */
222
+ 0, /* tp_setattr */
223
+ 0, /* tp_as_async */
224
+ 0, /* tp_repr */
225
+ 0, /* tp_as_number */
226
+ 0, /* tp_as_sequence */
227
+ 0, /* tp_as_mapping */
228
+ 0, /* tp_hash */
229
+ 0, /* tp_call */
230
+ 0, /* tp_str */
231
+ 0, /* tp_getattro */
232
+ 0, /* tp_setattro */
233
+ 0, /* tp_as_buffer */
234
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
235
+ 0, /* tp_doc */
236
+ (traverseproc) closure_traverse, /* tp_traverse */
237
+ 0, /* tp_clear */
238
+ 0, /* tp_richcompare */
239
+ offsetof(ClosureObject, weakreflist), /* tp_weaklistoffset */
240
+ 0, /* tp_iter */
241
+ 0, /* tp_iternext */
242
+ 0, /* tp_methods */
243
+ 0, /* tp_members */
244
+ 0, /* tp_getset */
245
+ 0, /* tp_base */
246
+ 0, /* tp_dict */
247
+ 0, /* tp_descr_get */
248
+ 0, /* tp_descr_set */
249
+ 0, /* tp_dictoffset */
250
+ 0, /* tp_init */
251
+ 0, /* tp_alloc */
252
+ 0, /* tp_new */
253
+ 0, /* tp_free */
254
+ 0, /* tp_is_gc */
255
+ 0, /* tp_bases */
256
+ 0, /* tp_mro */
257
+ 0, /* tp_cache */
258
+ 0, /* tp_subclasses */
259
+ 0, /* tp_weaklist */
260
+ 0, /* tp_del */
261
+ 0, /* tp_version_tag */
262
+ 0, /* tp_finalize */
263
+ 0, /* tp_vectorcall */
264
+ #if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12)
265
+ /* This was introduced first in 3.12
266
+ * https://github.com/python/cpython/issues/91051
267
+ */
268
+ 0, /* tp_watched */
269
+ #endif
270
+
271
+ /* WARNING: Do not remove this, only modify it! It is a version guard to
272
+ * act as a reminder to update this struct on Python version update! */
273
+ #if (PY_MAJOR_VERSION == 3)
274
+ #if ! (NB_SUPPORTED_PYTHON_MINOR)
275
+ #error "Python minor version is not supported."
276
+ #endif
277
+ #else
278
+ #error "Python major version is not supported."
279
+ #endif
280
+ /* END WARNING*/
281
+ };
282
+
283
+
284
+ /* Return an owned piece of character data duplicating a Python string
285
+ object's value. */
286
+ static char *
287
+ dup_string(PyObject *strobj)
288
+ {
289
+ const char *tmp = NULL;
290
+ char *str;
291
+ tmp = PyString_AsString(strobj);
292
+ if (tmp == NULL)
293
+ return NULL;
294
+ /* Using PyObject_Malloc allows this memory to be tracked for
295
+ leaks. */
296
+ str = PyObject_Malloc(strlen(tmp) + 1);
297
+ if (str == NULL) {
298
+ PyErr_NoMemory();
299
+ return NULL;
300
+ }
301
+ strcpy(str, tmp);
302
+ return str;
303
+ }
304
+
305
+ /* Create and initialize a new Closure object */
306
+ static ClosureObject *
307
+ closure_new(PyObject *name, PyObject *doc, PyCFunction fnaddr,
308
+ EnvironmentObject *env, PyObject *keepalive)
309
+ {
310
+ ClosureObject *clo = (ClosureObject *) PyType_GenericAlloc(&ClosureType, 0);
311
+ if (clo == NULL)
312
+ return NULL;
313
+
314
+ clo->def.ml_name = dup_string(name);
315
+ if (!clo->def.ml_name) {
316
+ Py_DECREF(clo);
317
+ return NULL;
318
+ }
319
+ clo->def.ml_meth = fnaddr;
320
+ clo->def.ml_flags = METH_VARARGS | METH_KEYWORDS;
321
+ clo->def.ml_doc = dup_string(doc);
322
+ if (!clo->def.ml_doc) {
323
+ Py_DECREF(clo);
324
+ return NULL;
325
+ }
326
+ Py_INCREF(env);
327
+ clo->env = env;
328
+ Py_XINCREF(keepalive);
329
+ clo->keepalive = keepalive;
330
+ return clo;
331
+ }
332
+
333
+ /* Create a new PyCFunction object wrapping a closure defined by
334
+ the given arguments. */
335
+ static PyObject *
336
+ pycfunction_new(PyObject *module, PyObject *name, PyObject *doc,
337
+ PyCFunction fnaddr, EnvironmentObject *env, PyObject *keepalive)
338
+ {
339
+ PyObject *funcobj;
340
+ PyObject *modname = NULL;
341
+ ClosureObject *closure = NULL;
342
+
343
+ closure = closure_new(name, doc, fnaddr, env, keepalive);
344
+ if (closure == NULL) goto FAIL;
345
+
346
+ modname = PyObject_GetAttrString(module, "__name__");
347
+ if (modname == NULL) goto FAIL;
348
+
349
+ funcobj = PyCFunction_NewEx(&closure->def, (PyObject *) closure, modname);
350
+ Py_DECREF(closure);
351
+ Py_DECREF(modname);
352
+
353
+ return funcobj;
354
+
355
+ FAIL:
356
+ Py_XDECREF(closure);
357
+ Py_XDECREF(modname);
358
+ return NULL;
359
+ }
360
+
361
+ /*
362
+ * Python-facing wrapper for Numba-compiled generator.
363
+ * Note the Environment's offset inside the struct is the same as in the
364
+ * Closure object. This is required to simplify generation of Python wrappers.
365
+ */
366
+
367
+ typedef void (*gen_finalizer_t)(void *);
368
+
369
+ typedef struct {
370
+ CLOSURE_HEAD
371
+ PyCFunctionWithKeywords nextfunc;
372
+ gen_finalizer_t finalizer;
373
+ PyObject *weakreflist;
374
+ union {
375
+ double dummy; /* Force alignment */
376
+ char state[0];
377
+ };
378
+ } GeneratorObject;
379
+
380
+ static int
381
+ generator_traverse(GeneratorObject *gen, visitproc visit, void *arg)
382
+ {
383
+ /* XXX this doesn't traverse the state, which can own references to
384
+ PyObjects */
385
+ Py_VISIT(gen->env);
386
+ return 0;
387
+ }
388
+
389
+ static int
390
+ generator_clear(GeneratorObject *gen)
391
+ {
392
+ if (gen->finalizer != NULL) {
393
+ gen->finalizer(gen->state);
394
+ gen->finalizer = NULL;
395
+ }
396
+ Py_CLEAR(gen->env);
397
+ gen->nextfunc = NULL;
398
+ return 0;
399
+ }
400
+
401
+ static void
402
+ generator_dealloc(GeneratorObject *gen)
403
+ {
404
+ PyObject_GC_UnTrack((PyObject *) gen);
405
+ if (gen->weakreflist != NULL)
406
+ PyObject_ClearWeakRefs((PyObject *) gen);
407
+ /* XXX The finalizer may be called after the LLVM module has been
408
+ destroyed (typically at interpreter shutdown) */
409
+ if (!_Py_IsFinalizing())
410
+ if (gen->finalizer != NULL)
411
+ gen->finalizer(gen->state);
412
+ Py_XDECREF(gen->env);
413
+ Py_TYPE(gen)->tp_free((PyObject *) gen);
414
+ }
415
+
416
+ static PyObject *
417
+ generator_iternext(GeneratorObject *gen)
418
+ {
419
+ PyObject *res, *args;
420
+ if (gen->nextfunc == NULL) {
421
+ PyErr_SetString(PyExc_RuntimeError,
422
+ "cannot call next() on finalized generator");
423
+ return NULL;
424
+ }
425
+ args = PyTuple_Pack(1, (PyObject *) gen);
426
+ if (args == NULL)
427
+ return NULL;
428
+ res = (*gen->nextfunc)((PyObject *) gen, args, NULL);
429
+ Py_DECREF(args);
430
+ return res;
431
+ }
432
+
433
+ static PyTypeObject GeneratorType = {
434
+ PyVarObject_HEAD_INIT(NULL, 0)
435
+ "_dynfunc._Generator", /* tp_name*/
436
+ offsetof(GeneratorObject, state), /* tp_basicsize*/
437
+ 1, /* tp_itemsize*/
438
+ (destructor) generator_dealloc, /* tp_dealloc*/
439
+ 0, /* tp_vectorcall_offset*/
440
+ 0, /* tp_getattr*/
441
+ 0, /* tp_setattr*/
442
+ 0, /* tp_as_async*/
443
+ 0, /* tp_repr*/
444
+ 0, /* tp_as_number*/
445
+ 0, /* tp_as_sequence*/
446
+ 0, /* tp_as_mapping*/
447
+ 0, /* tp_hash */
448
+ 0, /* tp_call*/
449
+ 0, /* tp_str*/
450
+ 0, /* tp_getattro*/
451
+ 0, /* tp_setattro*/
452
+ 0, /* tp_as_buffer*/
453
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC
454
+ | Py_TPFLAGS_BASETYPE, /* tp_flags*/
455
+ 0, /* tp_doc */
456
+ (traverseproc) generator_traverse, /* tp_traverse */
457
+ (inquiry) generator_clear, /* tp_clear */
458
+ 0, /* tp_richcompare */
459
+ offsetof(GeneratorObject, weakreflist), /* tp_weaklistoffset */
460
+ PyObject_SelfIter, /* tp_iter */
461
+ (iternextfunc) generator_iternext, /* tp_iternext */
462
+ 0, /* tp_methods */
463
+ 0, /* tp_members */
464
+ 0, /* tp_getset */
465
+ 0, /* tp_base */
466
+ 0, /* tp_dict */
467
+ 0, /* tp_descr_get */
468
+ 0, /* tp_descr_set */
469
+ 0, /* tp_dictoffset */
470
+ 0, /* tp_init */
471
+ 0, /* tp_alloc */
472
+ 0, /* tp_new */
473
+ 0, /* tp_free */
474
+ 0, /* tp_is_gc */
475
+ 0, /* tp_bases */
476
+ 0, /* tp_mro */
477
+ 0, /* tp_cache */
478
+ 0, /* tp_subclasses */
479
+ 0, /* tp_weaklist */
480
+ 0, /* tp_del */
481
+ 0, /* tp_version_tag */
482
+ 0, /* tp_finalize */
483
+ 0, /* tp_vectorcall */
484
+ #if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12)
485
+ /* This was introduced first in 3.12
486
+ * https://github.com/python/cpython/issues/91051
487
+ */
488
+ 0, /* tp_watched */
489
+ #endif
490
+
491
+ /* WARNING: Do not remove this, only modify it! It is a version guard to
492
+ * act as a reminder to update this struct on Python version update! */
493
+ #if (PY_MAJOR_VERSION == 3)
494
+ #if ! (NB_SUPPORTED_PYTHON_MINOR)
495
+ #error "Python minor version is not supported."
496
+ #endif
497
+ #else
498
+ #error "Python major version is not supported."
499
+ #endif
500
+ /* END WARNING*/
501
+ };
502
+
503
+ /* Dynamically create a new generator object */
504
+ static PyObject *
505
+ Numba_make_generator(Py_ssize_t gen_state_size,
506
+ void *initial_state,
507
+ PyCFunctionWithKeywords nextfunc,
508
+ gen_finalizer_t finalizer,
509
+ EnvironmentObject *env)
510
+ {
511
+ GeneratorObject *gen;
512
+ gen = (GeneratorObject *) PyType_GenericAlloc(&GeneratorType, gen_state_size);
513
+ if (gen == NULL)
514
+ return NULL;
515
+ memcpy(gen->state, initial_state, gen_state_size);
516
+ gen->nextfunc = nextfunc;
517
+ Py_XINCREF(env);
518
+ gen->env = env;
519
+ gen->finalizer = finalizer;
520
+ return (PyObject *) gen;
521
+ }
522
+
523
+ /* Initialization subroutine for use by modules including this */
524
+ static int
525
+ init_dynfunc_module(PyObject *module)
526
+ {
527
+ if (PyType_Ready(&ClosureType))
528
+ return -1;
529
+ if (PyType_Ready(&EnvironmentType))
530
+ return -1;
531
+ if (PyType_Ready(&GeneratorType))
532
+ return -1;
533
+ return 0;
534
+ }
lib/python3.10/site-packages/numba/_dynfunc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (49.3 kB). View file
 
lib/python3.10/site-packages/numba/_dynfuncmod.c ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "_dynfunc.c"
2
+
3
+ /* Python-facing function to dynamically create a new C function object */
4
+ static PyObject*
5
+ make_function(PyObject *self, PyObject *args)
6
+ {
7
+ PyObject *module, *fname, *fdoc, *fnaddrobj;
8
+ void *fnaddr;
9
+ EnvironmentObject *env;
10
+ PyObject *keepalive;
11
+
12
+ if (!PyArg_ParseTuple(args, "OOOOO!|O",
13
+ &module, &fname, &fdoc, &fnaddrobj, &EnvironmentType, &env,
14
+ &keepalive)) {
15
+ return NULL;
16
+ }
17
+
18
+ fnaddr = PyLong_AsVoidPtr(fnaddrobj);
19
+ if (fnaddr == NULL && PyErr_Occurred())
20
+ return NULL;
21
+
22
+ return pycfunction_new(module, fname, fdoc, fnaddr, env, keepalive);
23
+ }
24
+
25
+ static PyMethodDef ext_methods[] = {
26
+ #define declmethod(func) { #func , ( PyCFunction )func , METH_VARARGS , NULL }
27
+ declmethod(make_function),
28
+ { NULL },
29
+ #undef declmethod
30
+ };
31
+
32
+
33
+ static PyObject *
34
+ build_c_helpers_dict(void)
35
+ {
36
+ PyObject *dct = PyDict_New();
37
+ if (dct == NULL)
38
+ goto error;
39
+
40
+ #define _declpointer(name, value) do { \
41
+ PyObject *o = PyLong_FromVoidPtr(value); \
42
+ if (o == NULL) goto error; \
43
+ if (PyDict_SetItemString(dct, name, o)) { \
44
+ Py_DECREF(o); \
45
+ goto error; \
46
+ } \
47
+ Py_DECREF(o); \
48
+ } while (0)
49
+
50
+ #define declmethod(func) _declpointer(#func, &Numba_##func)
51
+
52
+ #define declpointer(ptr) _declpointer(#ptr, &ptr)
53
+
54
+ declmethod(make_generator);
55
+
56
+ #undef declmethod
57
+ return dct;
58
+ error:
59
+ Py_XDECREF(dct);
60
+ return NULL;
61
+ }
62
+
63
+ MOD_INIT(_dynfunc) {
64
+ PyObject *m, *impl_info;
65
+
66
+ MOD_DEF(m, "_dynfunc", "No docs", ext_methods)
67
+ if (m == NULL)
68
+ return MOD_ERROR_VAL;
69
+
70
+ if (init_dynfunc_module(m))
71
+ return MOD_ERROR_VAL;
72
+
73
+ impl_info = Py_BuildValue(
74
+ "{snsnsn}",
75
+ "offsetof_closure_body", offsetof(ClosureObject, env),
76
+ "offsetof_env_body", offsetof(EnvironmentObject, globals),
77
+ "offsetof_generator_state", offsetof(GeneratorObject, state)
78
+ );
79
+ if (impl_info == NULL)
80
+ return MOD_ERROR_VAL;
81
+ PyModule_AddObject(m, "_impl_info", impl_info);
82
+
83
+ Py_INCREF(&ClosureType);
84
+ PyModule_AddObject(m, "_Closure", (PyObject *) (&ClosureType));
85
+ Py_INCREF(&EnvironmentType);
86
+ PyModule_AddObject(m, "Environment", (PyObject *) (&EnvironmentType));
87
+ Py_INCREF(&GeneratorType);
88
+ PyModule_AddObject(m, "_Generator", (PyObject *) (&GeneratorType));
89
+
90
+ PyModule_AddObject(m, "c_helpers", build_c_helpers_dict());
91
+
92
+ return MOD_SUCCESS_VAL(m);
93
+ }
lib/python3.10/site-packages/numba/_hashtable.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * See _hashtable.c for more information about this file.
3
+ */
4
+
5
+ #ifndef Py_HASHTABLE_H
6
+ #define Py_HASHTABLE_H
7
+
8
+ /* The whole API is private */
9
+ #ifndef Py_LIMITED_API
10
+
11
+ typedef struct _Py_slist_item_s {
12
+ struct _Py_slist_item_s *next;
13
+ } _Py_slist_item_t;
14
+
15
+ typedef struct {
16
+ _Py_slist_item_t *head;
17
+ } _Py_slist_t;
18
+
19
+ #define _Py_SLIST_ITEM_NEXT(ITEM) (((_Py_slist_item_t *)ITEM)->next)
20
+
21
+ #define _Py_SLIST_HEAD(SLIST) (((_Py_slist_t *)SLIST)->head)
22
+
23
+ typedef struct {
24
+ /* used by _Numba_hashtable_t.buckets to link entries */
25
+ _Py_slist_item_t _Py_slist_item;
26
+
27
+ const void *key;
28
+ Py_uhash_t key_hash;
29
+
30
+ /* data follows */
31
+ } _Numba_hashtable_entry_t;
32
+
33
+ #define _Numba_HASHTABLE_ENTRY_DATA(ENTRY) \
34
+ ((char *)(ENTRY) + sizeof(_Numba_hashtable_entry_t))
35
+
36
+ #define _Numba_HASHTABLE_ENTRY_DATA_AS_VOID_P(ENTRY) \
37
+ (*(void **)_Numba_HASHTABLE_ENTRY_DATA(ENTRY))
38
+
39
+ #define _Numba_HASHTABLE_ENTRY_READ_DATA(TABLE, DATA, DATA_SIZE, ENTRY) \
40
+ do { \
41
+ assert((DATA_SIZE) == (TABLE)->data_size); \
42
+ memcpy(DATA, _Numba_HASHTABLE_ENTRY_DATA(ENTRY), DATA_SIZE); \
43
+ } while (0)
44
+
45
+ typedef Py_uhash_t (*_Numba_hashtable_hash_func) (const void *key);
46
+ typedef int (*_Numba_hashtable_compare_func) (const void *key, const _Numba_hashtable_entry_t *he);
47
+ typedef void* (*_Numba_hashtable_copy_data_func)(void *data);
48
+ typedef void (*_Numba_hashtable_free_data_func)(void *data);
49
+ typedef size_t (*_Numba_hashtable_get_data_size_func)(void *data);
50
+
51
+ typedef struct {
52
+ /* allocate a memory block */
53
+ void* (*malloc) (size_t size);
54
+
55
+ /* release a memory block */
56
+ void (*free) (void *ptr);
57
+ } _Numba_hashtable_allocator_t;
58
+
59
+ typedef struct {
60
+ size_t num_buckets;
61
+ size_t entries; /* Total number of entries in the table. */
62
+ _Py_slist_t *buckets;
63
+ size_t data_size;
64
+
65
+ _Numba_hashtable_hash_func hash_func;
66
+ _Numba_hashtable_compare_func compare_func;
67
+ _Numba_hashtable_copy_data_func copy_data_func;
68
+ _Numba_hashtable_free_data_func free_data_func;
69
+ _Numba_hashtable_get_data_size_func get_data_size_func;
70
+ _Numba_hashtable_allocator_t alloc;
71
+ } _Numba_hashtable_t;
72
+
73
+ /* hash and compare functions for integers and pointers */
74
+ extern "C" PyAPI_FUNC(Py_uhash_t) _Numba_hashtable_hash_ptr(const void *key);
75
+ extern "C" PyAPI_FUNC(Py_uhash_t) _Numba_hashtable_hash_int(const void *key);
76
+ extern "C" PyAPI_FUNC(int) _Numba_hashtable_compare_direct(const void *key, const _Numba_hashtable_entry_t *entry);
77
+
78
+ extern "C" PyAPI_FUNC(_Numba_hashtable_t *) _Numba_hashtable_new(
79
+ size_t data_size,
80
+ _Numba_hashtable_hash_func hash_func,
81
+ _Numba_hashtable_compare_func compare_func);
82
+ extern "C" PyAPI_FUNC(_Numba_hashtable_t *) _Numba_hashtable_new_full(
83
+ size_t data_size,
84
+ size_t init_size,
85
+ _Numba_hashtable_hash_func hash_func,
86
+ _Numba_hashtable_compare_func compare_func,
87
+ _Numba_hashtable_copy_data_func copy_data_func,
88
+ _Numba_hashtable_free_data_func free_data_func,
89
+ _Numba_hashtable_get_data_size_func get_data_size_func,
90
+ _Numba_hashtable_allocator_t *allocator);
91
+ extern "C" PyAPI_FUNC(_Numba_hashtable_t *) _Numba_hashtable_copy(_Numba_hashtable_t *src);
92
+ extern "C" PyAPI_FUNC(void) _Numba_hashtable_clear(_Numba_hashtable_t *ht);
93
+ extern "C" PyAPI_FUNC(void) _Numba_hashtable_destroy(_Numba_hashtable_t *ht);
94
+
95
+ typedef int (*_Numba_hashtable_foreach_func) (_Numba_hashtable_entry_t *entry, void *arg);
96
+
97
+ extern "C" PyAPI_FUNC(int) _Numba_hashtable_foreach(
98
+ _Numba_hashtable_t *ht,
99
+ _Numba_hashtable_foreach_func func, void *arg);
100
+ extern "C" PyAPI_FUNC(size_t) _Numba_hashtable_size(_Numba_hashtable_t *ht);
101
+
102
+ extern "C" PyAPI_FUNC(_Numba_hashtable_entry_t*) _Numba_hashtable_get_entry(
103
+ _Numba_hashtable_t *ht,
104
+ const void *key);
105
+ extern "C" PyAPI_FUNC(int) _Numba_hashtable_set(
106
+ _Numba_hashtable_t *ht,
107
+ const void *key,
108
+ void *data,
109
+ size_t data_size);
110
+ extern "C" PyAPI_FUNC(int) _Numba_hashtable_get(
111
+ _Numba_hashtable_t *ht,
112
+ const void *key,
113
+ void *data,
114
+ size_t data_size);
115
+ extern "C" PyAPI_FUNC(int) _Numba_hashtable_pop(
116
+ _Numba_hashtable_t *ht,
117
+ const void *key,
118
+ void *data,
119
+ size_t data_size);
120
+ extern "C" PyAPI_FUNC(void) _Numba_hashtable_delete(
121
+ _Numba_hashtable_t *ht,
122
+ const void *key);
123
+
124
+ #define _Numba_HASHTABLE_SET(TABLE, KEY, DATA) \
125
+ _Numba_hashtable_set(TABLE, KEY, &(DATA), sizeof(DATA))
126
+
127
+ #define _Numba_HASHTABLE_GET(TABLE, KEY, DATA) \
128
+ _Numba_hashtable_get(TABLE, KEY, &(DATA), sizeof(DATA))
129
+
130
+ #endif /* Py_LIMITED_API */
131
+
132
+ #endif
lib/python3.10/site-packages/numba/_helperlib.c ADDED
@@ -0,0 +1,1251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Helper functions used by Numba at runtime.
3
+ * This C file is meant to be included after defining the
4
+ * NUMBA_EXPORT_FUNC() and NUMBA_EXPORT_DATA() macros.
5
+ */
6
+
7
+ #include "_pymodule.h"
8
+ #include <stddef.h>
9
+ #include <stdio.h>
10
+ #include <math.h>
11
+ #include <complex.h>
12
+ #ifdef _MSC_VER
13
+ #define int64_t signed __int64
14
+ #define uint64_t unsigned __int64
15
+ #define uint32_t unsigned __int32
16
+ #define _complex_float_t _Fcomplex
17
+ #define _complex_float_ctor(r, i) _FCbuild(r, i)
18
+ #define _complex_double_t _Dcomplex
19
+ #else
20
+ #include <stdint.h>
21
+ #define _complex_float_t complex float
22
+ #if defined(_Imaginary_I)
23
+ #define _complex_float_ctor(r, i) (r + _Imaginary_I * i)
24
+ #elif defined(_Complex_I)
25
+ #define _complex_float_ctor(r, i) (r + _Complex_I * i)
26
+ #else
27
+ #error "Lack _Imaginary_I and _Complex_I"
28
+ #endif
29
+ #define _complex_double_t complex double
30
+ #endif
31
+ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
32
+ #include <numpy/ndarrayobject.h>
33
+ #include <numpy/arrayscalars.h>
34
+
35
+ #include "_arraystruct.h"
36
+
37
+
38
+ #if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 11)
39
+ /*
40
+ * For struct _frame
41
+ */
42
+ #include "internal/pycore_frame.h"
43
+ #endif
44
+
45
+ /*
46
+ * Other helpers.
47
+ */
48
+
49
+
50
+ /* Fix fmod() and fmodf() for windows x64 VC 9.0 (VS 2008)
51
+ https://support.microsoft.com/en-us/kb/982107
52
+ */
53
+ static void (*fnclex)(void) = NULL;
54
+
55
+ NUMBA_EXPORT_FUNC(double)
56
+ numba_fixed_fmod(double x, double y){
57
+ fnclex(); /* no inline asm in x64 =( */
58
+ return fmod(x, y);
59
+ }
60
+
61
+ NUMBA_EXPORT_FUNC(float)
62
+ numba_fixed_fmodf(float x, float y) {
63
+ fnclex(); /* no inline asm in x64 =( */
64
+ return fmodf(x, y);
65
+ }
66
+
67
+ NUMBA_EXPORT_FUNC(void)
68
+ numba_set_fnclex(void *fn){
69
+ fnclex = fn;
70
+ }
71
+
72
+ /* provide 64-bit division function to 32-bit platforms */
73
+ NUMBA_EXPORT_FUNC(int64_t)
74
+ numba_sdiv(int64_t a, int64_t b) {
75
+ return a / b;
76
+ }
77
+
78
+ NUMBA_EXPORT_FUNC(uint64_t)
79
+ numba_udiv(uint64_t a, uint64_t b) {
80
+ return a / b;
81
+ }
82
+
83
+ /* provide 64-bit remainder function to 32-bit platforms */
84
+ NUMBA_EXPORT_FUNC(int64_t)
85
+ numba_srem(int64_t a, int64_t b) {
86
+ return a % b;
87
+ }
88
+
89
+ NUMBA_EXPORT_FUNC(uint64_t)
90
+ numba_urem(uint64_t a, uint64_t b) {
91
+ return a % b;
92
+ }
93
+
94
+ /* provide frexp and ldexp; these wrappers deal with special cases
95
+ * (zero, nan, infinity) directly, to sidestep platform differences.
96
+ */
97
+ NUMBA_EXPORT_FUNC(double)
98
+ numba_frexp(double x, int *exp)
99
+ {
100
+ if (!Py_IS_FINITE(x) || !x)
101
+ *exp = 0;
102
+ else
103
+ x = frexp(x, exp);
104
+ return x;
105
+ }
106
+
107
+ NUMBA_EXPORT_FUNC(float)
108
+ numba_frexpf(float x, int *exp)
109
+ {
110
+ if (Py_IS_NAN(x) || Py_IS_INFINITY(x) || !x)
111
+ *exp = 0;
112
+ else
113
+ x = frexpf(x, exp);
114
+ return x;
115
+ }
116
+
117
+ NUMBA_EXPORT_FUNC(double)
118
+ numba_ldexp(double x, int exp)
119
+ {
120
+ if (Py_IS_FINITE(x) && x && exp)
121
+ x = ldexp(x, exp);
122
+ return x;
123
+ }
124
+
125
+ NUMBA_EXPORT_FUNC(float)
126
+ numba_ldexpf(float x, int exp)
127
+ {
128
+ if (Py_IS_FINITE(x) && x && exp)
129
+ x = ldexpf(x, exp);
130
+ return x;
131
+ }
132
+
133
+ /* provide complex power */
134
+ NUMBA_EXPORT_FUNC(void)
135
+ numba_cpow(Py_complex *a, Py_complex *b, Py_complex *out) {
136
+ errno = 0;
137
+ *out = _Py_c_pow(*a, *b);
138
+ if (errno == EDOM) {
139
+ /* _Py_c_pow() doesn't bother returning the right value
140
+ in this case, as Python raises ZeroDivisionError */
141
+ out->real = out->imag = Py_NAN;
142
+ }
143
+ }
144
+
145
+ NUMBA_EXPORT_FUNC(void)
146
+ numba_cpowf(_complex_float_t *a, _complex_float_t *b, _complex_float_t *out) {
147
+ Py_complex _a, _b, _out;
148
+ _a.real = crealf(*a);
149
+ _a.imag = cimagf(*a);
150
+ _b.real = crealf(*b);
151
+ _b.imag = cimagf(*b);
152
+ numba_cpow(&_a, &_b, &_out);
153
+ *out = _complex_float_ctor((float) _out.real, (float) _out.imag);
154
+ }
155
+
156
+ /* C99 math functions: redirect to system implementations */
157
+
158
+ NUMBA_EXPORT_FUNC(double)
159
+ numba_gamma(double x)
160
+ {
161
+ return tgamma(x);
162
+ }
163
+
164
+ NUMBA_EXPORT_FUNC(float)
165
+ numba_gammaf(float x)
166
+ {
167
+ return tgammaf(x);
168
+ }
169
+
170
+ NUMBA_EXPORT_FUNC(double)
171
+ numba_lgamma(double x)
172
+ {
173
+ return lgamma(x);
174
+ }
175
+
176
+ NUMBA_EXPORT_FUNC(float)
177
+ numba_lgammaf(float x)
178
+ {
179
+ return lgammaf(x);
180
+ }
181
+
182
+ NUMBA_EXPORT_FUNC(double)
183
+ numba_erf(double x)
184
+ {
185
+ return erf(x);
186
+ }
187
+
188
+ NUMBA_EXPORT_FUNC(float)
189
+ numba_erff(float x)
190
+ {
191
+ return erff(x);
192
+ }
193
+
194
+ NUMBA_EXPORT_FUNC(double)
195
+ numba_erfc(double x)
196
+ {
197
+ return erfc(x);
198
+ }
199
+
200
+ NUMBA_EXPORT_FUNC(float)
201
+ numba_erfcf(float x)
202
+ {
203
+ return erfcf(x);
204
+ }
205
+
206
+ NUMBA_EXPORT_FUNC(float)
207
+ numba_nextafterf(float a, float b)
208
+ {
209
+ return nextafterf(a, b);
210
+ }
211
+
212
+ NUMBA_EXPORT_FUNC(double)
213
+ numba_nextafter(double a, double b)
214
+ {
215
+ return nextafter(a, b);
216
+ }
217
+
218
+ /* Unpack any Python complex-like object into a Py_complex structure */
219
+ NUMBA_EXPORT_FUNC(int)
220
+ numba_complex_adaptor(PyObject* obj, Py_complex *out) {
221
+ PyObject* fobj;
222
+ PyArray_Descr *dtype;
223
+ double val[2];
224
+
225
+ // Convert from python complex or numpy complex128
226
+ if (PyComplex_Check(obj)) {
227
+ out->real = PyComplex_RealAsDouble(obj);
228
+ out->imag = PyComplex_ImagAsDouble(obj);
229
+ }
230
+ // Convert from numpy complex64
231
+ else if (PyArray_IsScalar(obj, ComplexFloating)) {
232
+ dtype = PyArray_DescrFromScalar(obj);
233
+ if (dtype == NULL) {
234
+ return 0;
235
+ }
236
+ if (PyArray_CastScalarDirect(obj, dtype, &val[0], NPY_CDOUBLE) < 0) {
237
+ Py_DECREF(dtype);
238
+ return 0;
239
+ }
240
+ out->real = val[0];
241
+ out->imag = val[1];
242
+ Py_DECREF(dtype);
243
+ } else {
244
+ fobj = PyNumber_Float(obj);
245
+ if (!fobj) return 0;
246
+ out->real = PyFloat_AsDouble(fobj);
247
+ out->imag = 0.;
248
+ Py_DECREF(fobj);
249
+ }
250
+ return 1;
251
+ }
252
+
253
+ /* Minimum PyBufferObject structure to hack inside it */
254
+ typedef struct {
255
+ PyObject_HEAD
256
+ PyObject *b_base;
257
+ void *b_ptr;
258
+ Py_ssize_t b_size;
259
+ Py_ssize_t b_offset;
260
+ } PyBufferObject_Hack;
261
+
262
+ /*
263
+ Get data address of record data buffer
264
+ */
265
+ NUMBA_EXPORT_FUNC(void *)
266
+ numba_extract_record_data(PyObject *recordobj, Py_buffer *pbuf) {
267
+ PyObject *attrdata;
268
+ void *ptr;
269
+
270
+ attrdata = PyObject_GetAttrString(recordobj, "data");
271
+ if (!attrdata) return NULL;
272
+
273
+ if (-1 == PyObject_GetBuffer(attrdata, pbuf, 0)){
274
+ Py_DECREF(attrdata);
275
+ return NULL;
276
+ } else {
277
+ ptr = pbuf->buf;
278
+ }
279
+ Py_DECREF(attrdata);
280
+ return ptr;
281
+ }
282
+
283
+ /*
284
+ * Return a record instance with dtype as the record type, and backed
285
+ * by a copy of the memory area pointed to by (pdata, size).
286
+ */
287
+ NUMBA_EXPORT_FUNC(PyObject *)
288
+ numba_recreate_record(void *pdata, int size, PyObject *dtype) {
289
+ PyObject *numpy = NULL;
290
+ PyObject *numpy_record = NULL;
291
+ PyObject *aryobj = NULL;
292
+ PyObject *dtypearg = NULL;
293
+ PyObject *record = NULL;
294
+ PyArray_Descr *descr = NULL;
295
+
296
+ if (dtype == NULL) {
297
+ PyErr_Format(PyExc_RuntimeError,
298
+ "In 'numba_recreate_record', 'dtype' is NULL");
299
+ return NULL;
300
+ }
301
+
302
+ numpy = PyImport_ImportModule("numpy");
303
+ if (!numpy) goto CLEANUP;
304
+
305
+ numpy_record = PyObject_GetAttrString(numpy, "record");
306
+ if (!numpy_record) goto CLEANUP;
307
+
308
+ dtypearg = PyTuple_Pack(2, numpy_record, dtype);
309
+ if (!dtypearg || !PyArray_DescrConverter(dtypearg, &descr))
310
+ goto CLEANUP;
311
+
312
+ /* This steals a reference to descr, so we don't have to DECREF it */
313
+ aryobj = PyArray_FromString(pdata, size, descr, 1, NULL);
314
+ if (!aryobj) goto CLEANUP;
315
+
316
+ record = PySequence_GetItem(aryobj, 0);
317
+
318
+ CLEANUP:
319
+ Py_XDECREF(numpy);
320
+ Py_XDECREF(numpy_record);
321
+ Py_XDECREF(aryobj);
322
+ Py_XDECREF(dtypearg);
323
+
324
+ return record;
325
+ }
326
+
327
+ NUMBA_EXPORT_FUNC(int)
328
+ numba_adapt_ndarray(PyObject *obj, arystruct_t* arystruct) {
329
+ PyArrayObject *ndary;
330
+ int i, ndim;
331
+ npy_intp *p;
332
+
333
+ if (!PyArray_Check(obj)) {
334
+ return -1;
335
+ }
336
+
337
+ ndary = (PyArrayObject*)obj;
338
+ ndim = PyArray_NDIM(ndary);
339
+
340
+ arystruct->data = PyArray_DATA(ndary);
341
+ arystruct->nitems = PyArray_SIZE(ndary);
342
+ arystruct->itemsize = PyArray_ITEMSIZE(ndary);
343
+ arystruct->parent = obj;
344
+ p = arystruct->shape_and_strides;
345
+ for (i = 0; i < ndim; i++, p++) {
346
+ *p = PyArray_DIM(ndary, i);
347
+ }
348
+ for (i = 0; i < ndim; i++, p++) {
349
+ *p = PyArray_STRIDE(ndary, i);
350
+ }
351
+ arystruct->meminfo = NULL;
352
+ return 0;
353
+ }
354
+
355
+ NUMBA_EXPORT_FUNC(int)
356
+ numba_get_buffer(PyObject *obj, Py_buffer *buf)
357
+ {
358
+ /* Ask for shape and strides, but no suboffsets */
359
+ return PyObject_GetBuffer(obj, buf, PyBUF_RECORDS_RO);
360
+ }
361
+
362
+ NUMBA_EXPORT_FUNC(void)
363
+ numba_adapt_buffer(Py_buffer *buf, arystruct_t *arystruct)
364
+ {
365
+ int i;
366
+ npy_intp *p;
367
+
368
+ arystruct->data = buf->buf;
369
+ arystruct->itemsize = buf->itemsize;
370
+ arystruct->parent = buf->obj;
371
+ arystruct->nitems = 1;
372
+ p = arystruct->shape_and_strides;
373
+ for (i = 0; i < buf->ndim; i++, p++) {
374
+ *p = buf->shape[i];
375
+ arystruct->nitems *= buf->shape[i];
376
+ }
377
+ for (i = 0; i < buf->ndim; i++, p++) {
378
+ *p = buf->strides[i];
379
+ }
380
+ arystruct->meminfo = NULL;
381
+ }
382
+
383
+ NUMBA_EXPORT_FUNC(void)
384
+ numba_release_buffer(Py_buffer *buf)
385
+ {
386
+ PyBuffer_Release(buf);
387
+ }
388
+
389
+ NUMBA_EXPORT_FUNC(PyObject *)
390
+ numba_ndarray_new(int nd,
391
+ npy_intp *dims, /* shape */
392
+ npy_intp *strides,
393
+ void* data,
394
+ int type_num,
395
+ int itemsize)
396
+ {
397
+ PyObject *ndary;
398
+ int flags = NPY_ARRAY_BEHAVED;
399
+ ndary = PyArray_New((PyTypeObject*)&PyArray_Type, nd, dims, type_num,
400
+ strides, data, 0, flags, NULL);
401
+ return ndary;
402
+ }
403
+
404
+
405
+ /*
406
+ * Handle reshaping of zero-sized array.
407
+ * See numba_attempt_nocopy_reshape() below.
408
+ */
409
+ static int
410
+ nocopy_empty_reshape(npy_intp nd, const npy_intp *dims, const npy_intp *strides,
411
+ npy_intp newnd, const npy_intp *newdims,
412
+ npy_intp *newstrides, npy_intp itemsize,
413
+ int is_f_order)
414
+ {
415
+ int i;
416
+ /* Just make the strides vaguely reasonable
417
+ * (they can have any value in theory).
418
+ */
419
+ for (i = 0; i < newnd; i++)
420
+ newstrides[i] = itemsize;
421
+ return 1; /* reshape successful */
422
+ }
423
+
424
+ /*
425
+ * Straight from Numpy's _attempt_nocopy_reshape()
426
+ * (np/core/src/multiarray/shape.c).
427
+ * Attempt to reshape an array without copying data
428
+ *
429
+ * This function should correctly handle all reshapes, including
430
+ * axes of length 1. Zero strides should work but are untested.
431
+ *
432
+ * If a copy is needed, returns 0
433
+ * If no copy is needed, returns 1 and fills `npy_intp *newstrides`
434
+ * with appropriate strides
435
+ */
436
+
437
+ NUMBA_EXPORT_FUNC(int)
438
+ numba_attempt_nocopy_reshape(npy_intp nd, const npy_intp *dims, const npy_intp *strides,
439
+ npy_intp newnd, const npy_intp *newdims,
440
+ npy_intp *newstrides, npy_intp itemsize,
441
+ int is_f_order)
442
+ {
443
+ int oldnd;
444
+ npy_intp olddims[NPY_MAXDIMS];
445
+ npy_intp oldstrides[NPY_MAXDIMS];
446
+ npy_intp np, op, last_stride;
447
+ int oi, oj, ok, ni, nj, nk;
448
+
449
+ oldnd = 0;
450
+ /*
451
+ * Remove axes with dimension 1 from the old array. They have no effect
452
+ * but would need special cases since their strides do not matter.
453
+ */
454
+ for (oi = 0; oi < nd; oi++) {
455
+ if (dims[oi]!= 1) {
456
+ olddims[oldnd] = dims[oi];
457
+ oldstrides[oldnd] = strides[oi];
458
+ oldnd++;
459
+ }
460
+ }
461
+
462
+ np = 1;
463
+ for (ni = 0; ni < newnd; ni++) {
464
+ np *= newdims[ni];
465
+ }
466
+ op = 1;
467
+ for (oi = 0; oi < oldnd; oi++) {
468
+ op *= olddims[oi];
469
+ }
470
+ if (np != op) {
471
+ /* different total sizes; no hope */
472
+ return 0;
473
+ }
474
+
475
+ if (np == 0) {
476
+ /* the Numpy code does not handle 0-sized arrays */
477
+ return nocopy_empty_reshape(nd, dims, strides,
478
+ newnd, newdims, newstrides,
479
+ itemsize, is_f_order);
480
+ }
481
+
482
+ /* oi to oj and ni to nj give the axis ranges currently worked with */
483
+ oi = 0;
484
+ oj = 1;
485
+ ni = 0;
486
+ nj = 1;
487
+ while (ni < newnd && oi < oldnd) {
488
+ np = newdims[ni];
489
+ op = olddims[oi];
490
+
491
+ while (np != op) {
492
+ if (np < op) {
493
+ /* Misses trailing 1s, these are handled later */
494
+ np *= newdims[nj++];
495
+ } else {
496
+ op *= olddims[oj++];
497
+ }
498
+ }
499
+
500
+ /* Check whether the original axes can be combined */
501
+ for (ok = oi; ok < oj - 1; ok++) {
502
+ if (is_f_order) {
503
+ if (oldstrides[ok+1] != olddims[ok]*oldstrides[ok]) {
504
+ /* not contiguous enough */
505
+ return 0;
506
+ }
507
+ }
508
+ else {
509
+ /* C order */
510
+ if (oldstrides[ok] != olddims[ok+1]*oldstrides[ok+1]) {
511
+ /* not contiguous enough */
512
+ return 0;
513
+ }
514
+ }
515
+ }
516
+
517
+ /* Calculate new strides for all axes currently worked with */
518
+ if (is_f_order) {
519
+ newstrides[ni] = oldstrides[oi];
520
+ for (nk = ni + 1; nk < nj; nk++) {
521
+ newstrides[nk] = newstrides[nk - 1]*newdims[nk - 1];
522
+ }
523
+ }
524
+ else {
525
+ /* C order */
526
+ newstrides[nj - 1] = oldstrides[oj - 1];
527
+ for (nk = nj - 1; nk > ni; nk--) {
528
+ newstrides[nk - 1] = newstrides[nk]*newdims[nk];
529
+ }
530
+ }
531
+ ni = nj++;
532
+ oi = oj++;
533
+ }
534
+
535
+ /*
536
+ * Set strides corresponding to trailing 1s of the new shape.
537
+ */
538
+ if (ni >= 1) {
539
+ last_stride = newstrides[ni - 1];
540
+ }
541
+ else {
542
+ last_stride = itemsize;
543
+ }
544
+ if (is_f_order) {
545
+ last_stride *= newdims[ni - 1];
546
+ }
547
+ for (nk = ni; nk < newnd; nk++) {
548
+ newstrides[nk] = last_stride;
549
+ }
550
+
551
+ return 1;
552
+ }
553
+
554
+ /*
555
+ * Cython utilities.
556
+ */
557
+
558
+ /* Fetch the address of the given function, as exposed by
559
+ a cython module */
560
+ static void *
561
+ import_cython_function(const char *module_name, const char *function_name)
562
+ {
563
+ PyObject *module, *capi, *cobj;
564
+ void *res = NULL;
565
+ const char *capsule_name;
566
+
567
+ module = PyImport_ImportModule(module_name);
568
+ if (module == NULL)
569
+ return NULL;
570
+ capi = PyObject_GetAttrString(module, "__pyx_capi__");
571
+ Py_DECREF(module);
572
+ if (capi == NULL)
573
+ return NULL;
574
+ cobj = PyMapping_GetItemString(capi, (char *)function_name);
575
+ Py_DECREF(capi);
576
+ if (cobj == NULL) {
577
+ PyErr_Clear();
578
+ PyErr_Format(PyExc_ValueError,
579
+ "No function '%s' found in __pyx_capi__ of '%s'",
580
+ function_name, module_name);
581
+ return NULL;
582
+ }
583
+ /* 2.7+ => Cython exports a PyCapsule */
584
+ capsule_name = PyCapsule_GetName(cobj);
585
+ if (capsule_name != NULL) {
586
+ res = PyCapsule_GetPointer(cobj, capsule_name);
587
+ }
588
+ Py_DECREF(cobj);
589
+ return res;
590
+ }
591
+
592
+ NUMBA_EXPORT_FUNC(PyObject *)
593
+ _numba_import_cython_function(PyObject *self, PyObject *args)
594
+ {
595
+ const char *module_name;
596
+ const char *function_name;
597
+ void *p = NULL;
598
+ PyObject *res;
599
+
600
+ if (!PyArg_ParseTuple(args, "ss", &module_name, &function_name)) {
601
+ return NULL;
602
+ }
603
+ p = import_cython_function(module_name, function_name);
604
+ if (p == NULL) {
605
+ return NULL;
606
+ }
607
+ res = PyLong_FromVoidPtr(p);
608
+ if (res == NULL) {
609
+ PyErr_SetString(PyExc_RuntimeError,
610
+ "Could not convert function address to int");
611
+ return NULL;
612
+ }
613
+ return res;
614
+ }
615
+
616
+ /* We use separate functions for datetime64 and timedelta64, to ensure
617
+ * proper type checking.
618
+ */
619
+ NUMBA_EXPORT_FUNC(npy_int64)
620
+ numba_extract_np_datetime(PyObject *td)
621
+ {
622
+ if (!PyArray_IsScalar(td, Datetime)) {
623
+ PyErr_SetString(PyExc_TypeError,
624
+ "expected a numpy.datetime64 object");
625
+ return -1;
626
+ }
627
+ return PyArrayScalar_VAL(td, Timedelta);
628
+ }
629
+
630
+ NUMBA_EXPORT_FUNC(npy_int64)
631
+ numba_extract_np_timedelta(PyObject *td)
632
+ {
633
+ if (!PyArray_IsScalar(td, Timedelta)) {
634
+ PyErr_SetString(PyExc_TypeError,
635
+ "expected a numpy.timedelta64 object");
636
+ return -1;
637
+ }
638
+ return PyArrayScalar_VAL(td, Timedelta);
639
+ }
640
+
641
+ NUMBA_EXPORT_FUNC(PyObject *)
642
+ numba_create_np_datetime(npy_int64 value, int unit_code)
643
+ {
644
+ PyDatetimeScalarObject *obj = (PyDatetimeScalarObject *)
645
+ PyArrayScalar_New(Datetime);
646
+ if (obj != NULL) {
647
+ obj->obval = value;
648
+ obj->obmeta.base = unit_code;
649
+ obj->obmeta.num = 1;
650
+ }
651
+ return (PyObject *) obj;
652
+ }
653
+
654
+ NUMBA_EXPORT_FUNC(PyObject *)
655
+ numba_create_np_timedelta(npy_int64 value, int unit_code)
656
+ {
657
+ PyTimedeltaScalarObject *obj = (PyTimedeltaScalarObject *)
658
+ PyArrayScalar_New(Timedelta);
659
+ if (obj != NULL) {
660
+ obj->obval = value;
661
+ obj->obmeta.base = unit_code;
662
+ obj->obmeta.num = 1;
663
+ }
664
+ return (PyObject *) obj;
665
+ }
666
+
667
+ NUMBA_EXPORT_FUNC(uint64_t)
668
+ numba_fptoui(double x) {
669
+ /* First cast to signed int of the full width to make sure sign extension
670
+ happens (this can make a difference on some platforms...). */
671
+ return (uint64_t) (int64_t) x;
672
+ }
673
+
674
+ NUMBA_EXPORT_FUNC(uint64_t)
675
+ numba_fptouif(float x) {
676
+ return (uint64_t) (int64_t) x;
677
+ }
678
+
679
+ NUMBA_EXPORT_FUNC(void)
680
+ numba_gil_ensure(PyGILState_STATE *state) {
681
+ *state = PyGILState_Ensure();
682
+ }
683
+
684
+ NUMBA_EXPORT_FUNC(void)
685
+ numba_gil_release(PyGILState_STATE *state) {
686
+ PyGILState_Release(*state);
687
+ }
688
+
689
+ NUMBA_EXPORT_FUNC(PyObject *)
690
+ numba_py_type(PyObject *obj) {
691
+ return (PyObject *) Py_TYPE(obj);
692
+ }
693
+
694
+
695
+ /*
696
+ * Functions for tagging an arbitrary Python object with an arbitrary pointer.
697
+ * These functions make strong lifetime assumptions, see below.
698
+ */
699
+
700
+ static PyObject *private_data_dict = NULL;
701
+
702
+ static PyObject *
703
+ _get_private_data_dict(void)
704
+ {
705
+ if (private_data_dict == NULL)
706
+ private_data_dict = PyDict_New();
707
+ return private_data_dict;
708
+ }
709
+
710
+ NUMBA_EXPORT_FUNC(void)
711
+ numba_set_pyobject_private_data(PyObject *obj, void *ptr)
712
+ {
713
+ PyObject *dct = _get_private_data_dict();
714
+ /* This assumes the reference to setobj is kept alive until the
715
+ call to numba_reset_set_private_data()! */
716
+ PyObject *key = PyLong_FromVoidPtr((void *) obj);
717
+ PyObject *value = PyLong_FromVoidPtr(ptr);
718
+
719
+ if (!dct || !value || !key)
720
+ goto error;
721
+ if (PyDict_SetItem(dct, key, value))
722
+ goto error;
723
+ Py_DECREF(key);
724
+ Py_DECREF(value);
725
+ return;
726
+
727
+ error:
728
+ Py_FatalError("unable to set private data");
729
+ }
730
+
731
+ NUMBA_EXPORT_FUNC(void *)
732
+ numba_get_pyobject_private_data(PyObject *obj)
733
+ {
734
+ PyObject *dct = _get_private_data_dict();
735
+ PyObject *value, *key = PyLong_FromVoidPtr((void *) obj);
736
+ void *ptr;
737
+ if (!dct || !key)
738
+ goto error;
739
+
740
+ value = PyDict_GetItem(dct, key);
741
+ Py_DECREF(key);
742
+ if (!value)
743
+ return NULL;
744
+ else {
745
+ ptr = PyLong_AsVoidPtr(value);
746
+ if (ptr == NULL && PyErr_Occurred())
747
+ goto error;
748
+ return ptr;
749
+ }
750
+
751
+ error:
752
+ Py_FatalError("unable to get private data");
753
+ return NULL;
754
+ }
755
+
756
+ NUMBA_EXPORT_FUNC(void)
757
+ numba_reset_pyobject_private_data(PyObject *obj)
758
+ {
759
+ PyObject *dct = _get_private_data_dict();
760
+ PyObject *key = PyLong_FromVoidPtr((void *) obj);
761
+
762
+ if (!key)
763
+ goto error;
764
+ if (PyDict_DelItem(dct, key))
765
+ PyErr_Clear();
766
+ Py_DECREF(key);
767
+ return;
768
+
769
+ error:
770
+ Py_FatalError("unable to reset private data");
771
+ }
772
+
773
+ NUMBA_EXPORT_FUNC(int)
774
+ numba_unpack_slice(PyObject *obj,
775
+ Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step)
776
+ {
777
+ PySliceObject *slice = (PySliceObject *) obj;
778
+ if (!PySlice_Check(obj)) {
779
+ PyErr_Format(PyExc_TypeError,
780
+ "Expected a slice object, got '%s'",
781
+ Py_TYPE(slice)->tp_name);
782
+ return -1;
783
+ }
784
+ #define FETCH_MEMBER(NAME, DEFAULT) \
785
+ if (slice->NAME != Py_None) { \
786
+ Py_ssize_t v = PyNumber_AsSsize_t(slice->NAME, \
787
+ PyExc_OverflowError); \
788
+ if (v == -1 && PyErr_Occurred()) \
789
+ return -1; \
790
+ *NAME = v; \
791
+ } \
792
+ else { \
793
+ *NAME = DEFAULT; \
794
+ }
795
+ FETCH_MEMBER(step, 1)
796
+ FETCH_MEMBER(stop, (*step > 0) ? PY_SSIZE_T_MAX : PY_SSIZE_T_MIN)
797
+ FETCH_MEMBER(start, (*step > 0) ? 0 : PY_SSIZE_T_MAX)
798
+ return 0;
799
+
800
+ #undef FETCH_MEMBER
801
+ }
802
+
803
+ NUMBA_EXPORT_FUNC(int)
804
+ numba_fatal_error(void)
805
+ {
806
+ PyGILState_Ensure();
807
+ Py_FatalError("in Numba-compiled function");
808
+ return 0; /* unreachable */
809
+ }
810
+
811
+ /* Insert a frame into the traceback for (funcname, filename, lineno). */
812
+ /* This function is CPython's _PyTraceback_Add, renamed, see:
813
+ * https://github.com/python/cpython/blob/d545869d084e70d4838310e79b52a25a72a1ca56/Python/traceback.c#L246
814
+ * and modified for Python 2.x based on
815
+ * https://github.com/python/cpython/blob/2e1a34025cde19bddf12a2eac8fedb6afcca8339/Modules/_ctypes/callbacks.c#L151-L174
816
+ */
817
+ static void traceback_add(const char *funcname, const char *filename, int lineno)
818
+ {
819
+ PyObject *globals = NULL;
820
+ PyCodeObject *code = NULL;
821
+ PyFrameObject *frame = NULL;
822
+ PyObject *exc, *val, *tb;
823
+
824
+ /* Save and clear the current exception. Python functions must not be
825
+ called with an exception set. Calling Python functions happens when
826
+ the codec of the filesystem encoding is implemented in pure Python. */
827
+ PyErr_Fetch(&exc, &val, &tb);
828
+
829
+ globals = PyDict_New();
830
+ if (!globals)
831
+ goto error;
832
+ code = PyCode_NewEmpty(filename, funcname, lineno);
833
+ if (!code) {
834
+ goto error;
835
+ }
836
+ frame = PyFrame_New(PyThreadState_Get(), code, globals, NULL);
837
+ Py_DECREF(globals);
838
+ Py_DECREF(code);
839
+ if (!frame)
840
+ goto error;
841
+
842
+ #if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12) || (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 13) /* 3.12 or 3.13 */
843
+ #elif (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 11) /* 3.11 */
844
+
845
+ /* unsafe cast to our copy of _frame to access the f_lineno field */
846
+ typedef struct _frame py_frame;
847
+ py_frame* hacked_frame = (py_frame*)frame;
848
+ hacked_frame->f_lineno = lineno;
849
+
850
+ #elif (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION < 11) /* <3.11 */
851
+ frame->f_lineno = lineno;
852
+ #else
853
+ #error "Check if struct _frame has been changed in the new version"
854
+ #endif
855
+ PyErr_Restore(exc, val, tb);
856
+ PyTraceBack_Here(frame);
857
+ Py_DECREF(frame);
858
+ return;
859
+
860
+ #if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12) || (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 13) /* 3.12 or 3.13 */
861
+ error:
862
+ _PyErr_ChainExceptions1(exc);
863
+ #elif (PY_MAJOR_VERSION == 3) && ((PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11)) /* 3.11 and below */
864
+ error:
865
+ _PyErr_ChainExceptions(exc, val, tb);
866
+ #else
867
+ #error "Python major version is not supported."
868
+ #endif
869
+ }
870
+
871
+
872
+ /*
873
+ * Add traceback information to *loc* to the active exception.
874
+ * loc can be NULL, which causes this function to become a no-op.
875
+ */
876
+ static
877
+ void traceback_add_loc(PyObject *loc) {
878
+ const char *function_name_str = NULL, *filename_str = NULL;
879
+ PyObject *function_name = NULL, *filename = NULL, *lineno = NULL;
880
+ Py_ssize_t pos;
881
+
882
+ /* instance is instantiated/internal exception is raised, if loc is present
883
+ * add a frame for it into the traceback */
884
+ if(loc && loc != Py_None && PyTuple_Check(loc))
885
+ {
886
+ pos = 0;
887
+ function_name = PyTuple_GET_ITEM(loc, pos);
888
+ function_name_str = PyString_AsString(function_name);
889
+ pos = 1;
890
+ filename = PyTuple_GET_ITEM(loc, pos);
891
+ filename_str = PyString_AsString(filename);
892
+ pos = 2;
893
+ lineno = PyTuple_GET_ITEM(loc, pos);
894
+ traceback_add(function_name_str, filename_str, \
895
+ (int)PyLong_AsLong(lineno));
896
+ }
897
+ }
898
+
899
+ /**
900
+ * Re-raise the current active exception.
901
+ * Called internal by process_raise() when *exc* is None.
902
+ */
903
+ static
904
+ int reraise_exc_is_none(void) {
905
+ /* Reraise */
906
+ PyObject *tb, *type, *value;
907
+
908
+ #if (PY_MAJOR_VERSION >= 3) && (PY_MINOR_VERSION >= 11)
909
+ PyErr_GetExcInfo(&type, &value, &tb);
910
+ #elif (PY_MAJOR_VERSION >= 3) && (PY_MINOR_VERSION >= 10)
911
+ PyThreadState *tstate = PyThreadState_GET();
912
+ _PyErr_StackItem *tstate_exc = tstate->exc_info;
913
+ type = tstate_exc->exc_type;
914
+ value = tstate_exc->exc_value;
915
+ tb = tstate_exc->exc_traceback;
916
+ #endif
917
+ if (type == Py_None) {
918
+ PyErr_SetString(PyExc_RuntimeError,
919
+ "No active exception to reraise");
920
+ return 0;
921
+ }
922
+ /* incref needed because PyErr_Restore DOES NOT */
923
+ Py_XINCREF(type);
924
+ Py_XINCREF(value);
925
+ Py_XINCREF(tb);
926
+ PyErr_Restore(type, value, tb);
927
+ return 1;
928
+ }
929
+
930
+ /*
931
+ * Set exception given the Exception type and the constructor argument.
932
+ * Equivalent to ``raise exc(value)``.
933
+ * PyExceptionClass_Check(exc) must be True.
934
+ * value can be NULL.
935
+ */
936
+ static
937
+ int process_exception_class(PyObject *exc, PyObject *value) {
938
+ PyObject *type;
939
+ /* It is a class, type used here just as a tmp var */
940
+ type = PyObject_CallObject(exc, value);
941
+ if (type == NULL){
942
+ return 0;
943
+ }
944
+ if (!PyExceptionInstance_Check(type)) {
945
+ PyErr_SetString(PyExc_TypeError,
946
+ "exceptions must derive from BaseException");
947
+ Py_DECREF(type);
948
+ return 0;
949
+ }
950
+ /* all ok, set type to the exc */
951
+ Py_DECREF(type);
952
+ type = exc;
953
+ PyErr_SetObject(type, value);
954
+ return 1;
955
+ }
956
+
957
+ /*
958
+ * Internal routine to process exceptions.
959
+ * exc cannot be NULL. It can be a None, Exception type, or Exception instance.
960
+ * value can be NULL for absent, or any PyObject valid for the exception.
961
+ */
962
+ static
963
+ int process_raise(PyObject *exc, PyObject *value) {
964
+ /* exc is None */
965
+ if (exc == Py_None) {
966
+ return reraise_exc_is_none();
967
+ }
968
+ /* exc should be an exception class */
969
+ else if (PyExceptionClass_Check(exc)) {
970
+ return process_exception_class(exc, value);
971
+ }
972
+ /* exc is an instance of an Exception */
973
+ else if (PyExceptionInstance_Check(exc)) {
974
+ PyObject *type = PyExceptionInstance_Class(exc);
975
+ PyErr_SetObject(type, exc);
976
+ return 0;
977
+ }
978
+ else {
979
+ /* Not something you can raise. You get an exception
980
+ anyway, just not what you specified :-) */
981
+ PyErr_SetString(PyExc_TypeError,
982
+ "exceptions must derive from BaseException");
983
+ return 0;
984
+ }
985
+ }
986
+
987
+ /* Logic for raising an arbitrary object. Adapted from CPython's ceval.c.
988
+ This *consumes* a reference count to its argument. */
989
+ NUMBA_EXPORT_FUNC(int)
990
+ numba_do_raise(PyObject *exc_packed)
991
+ {
992
+ int status;
993
+ PyObject *exc = NULL, *value = NULL, *loc = NULL;
994
+
995
+ /* We support the following forms of raise:
996
+ raise
997
+ raise <instance>
998
+ raise <type> */
999
+
1000
+ /* could be a tuple from npm (some exc like thing, args, location) */
1001
+ if (PyTuple_CheckExact(exc_packed)) {
1002
+ /* Unpack a (class/inst/tuple, arguments, location) tuple. */
1003
+ if (!PyArg_ParseTuple(exc_packed, "OOO", &exc, &value, &loc)) {
1004
+ traceback_add_loc(loc);
1005
+ return 0;
1006
+ }
1007
+ } else {
1008
+ /* could be a reraise or an exception from objmode */
1009
+ exc = exc_packed;
1010
+ /* branch exit with value = NULL and loc = NULL */
1011
+ }
1012
+ /* value is either NULL or borrowed */
1013
+ status = process_raise(exc, value);
1014
+ traceback_add_loc(loc);
1015
+ Py_DECREF(exc_packed);
1016
+ return status;
1017
+ }
1018
+
1019
+ #ifdef PYCC_COMPILING
1020
+ /* AOT avoid the use of `numba.core.serialize` */
1021
+ NUMBA_EXPORT_FUNC(PyObject *)
1022
+ numba_unpickle(const char *data, int n, const char *hashed)
1023
+ {
1024
+ PyObject *buf, *obj;
1025
+ static PyObject *loads;
1026
+
1027
+ /* Caching the pickle.loads function shaves a couple µs here. */
1028
+ if (loads == NULL) {
1029
+ PyObject *picklemod;
1030
+ picklemod = PyImport_ImportModule("pickle");
1031
+ if (picklemod == NULL)
1032
+ return NULL;
1033
+ loads = PyObject_GetAttrString(picklemod, "loads");
1034
+ Py_DECREF(picklemod);
1035
+ if (loads == NULL)
1036
+ return NULL;
1037
+ }
1038
+
1039
+ buf = PyBytes_FromStringAndSize(data, n);
1040
+ if (buf == NULL)
1041
+ return NULL;
1042
+ obj = PyObject_CallFunctionObjArgs(loads, buf, NULL);
1043
+ Py_DECREF(buf);
1044
+ return obj;
1045
+ }
1046
+
1047
+ #else
1048
+
1049
+ NUMBA_EXPORT_FUNC(PyObject *)
1050
+ numba_unpickle(const char *data, int n, const char *hashed)
1051
+ {
1052
+ PyObject *buf=NULL, *obj=NULL, *addr=NULL, *hashedbuf=NULL;
1053
+ static PyObject *loads=NULL;
1054
+
1055
+ /* Caching the _numba_unpickle function shaves a couple µs here. */
1056
+ if (loads == NULL) {
1057
+ PyObject *picklemod;
1058
+ picklemod = PyImport_ImportModule("numba.core.serialize");
1059
+ if (picklemod == NULL)
1060
+ return NULL;
1061
+ loads = PyObject_GetAttrString(picklemod, "_numba_unpickle");
1062
+ Py_DECREF(picklemod);
1063
+ if (loads == NULL)
1064
+ return NULL;
1065
+ }
1066
+
1067
+ buf = PyBytes_FromStringAndSize(data, n);
1068
+ if (buf == NULL)
1069
+ return NULL;
1070
+ /* SHA1 produces 160 bit or 20 bytes */
1071
+ hashedbuf = PyBytes_FromStringAndSize(hashed, 20);
1072
+ if (hashedbuf == NULL)
1073
+ goto error;
1074
+ addr = PyLong_FromVoidPtr((void*)data);
1075
+ if (addr == NULL)
1076
+ goto error;
1077
+ obj = PyObject_CallFunctionObjArgs(loads, addr, buf, hashedbuf, NULL);
1078
+ error:
1079
+ Py_XDECREF(addr);
1080
+ Py_XDECREF(hashedbuf);
1081
+ Py_DECREF(buf);
1082
+ return obj;
1083
+ }
1084
+ #endif
1085
+
1086
+ NUMBA_EXPORT_FUNC(PyObject *)
1087
+ numba_runtime_build_excinfo_struct(PyObject* struct_gv, PyObject* exc_args)
1088
+ {
1089
+ PyObject *obj = NULL;
1090
+ static PyObject *func = NULL;
1091
+
1092
+ /* Caching the function shaves a couple µs here. */
1093
+ if (func == NULL)
1094
+ {
1095
+ PyObject *picklemod;
1096
+ picklemod = PyImport_ImportModule("numba.core.serialize");
1097
+ if (picklemod == NULL)
1098
+ return NULL;
1099
+ func = PyObject_GetAttrString(picklemod,
1100
+ "runtime_build_excinfo_struct");
1101
+ Py_DECREF(picklemod);
1102
+ if (func == NULL)
1103
+ return NULL;
1104
+ }
1105
+
1106
+ obj = PyObject_CallFunctionObjArgs(func, struct_gv, exc_args, NULL);
1107
+ // func returns None on failure (i.e. can't serialize one of the args).
1108
+ // Is there a better way to handle this? raise an exception here?
1109
+ return obj;
1110
+ }
1111
+
1112
+ /*
1113
+ * Unicode helpers
1114
+ */
1115
+
1116
+ /* Developer note:
1117
+ *
1118
+ * The hash value of unicode objects is obtained via:
1119
+ * ((PyASCIIObject *)(obj))->hash;
1120
+ * The use comes from this definition:
1121
+ * https://github.com/python/cpython/blob/6d43f6f081023b680d9db4542d19b9e382149f0a/Objects/unicodeobject.c#L119-L120
1122
+ * and it's used extensively throughout the `cpython/Object/unicodeobject.c`
1123
+ * source, not least in `unicode_hash` itself:
1124
+ * https://github.com/python/cpython/blob/6d43f6f081023b680d9db4542d19b9e382149f0a/Objects/unicodeobject.c#L11662-L11679
1125
+ *
1126
+ * The Unicode string struct layouts are described here:
1127
+ * https://github.com/python/cpython/blob/6d43f6f081023b680d9db4542d19b9e382149f0a/Include/cpython/unicodeobject.h#L82-L161
1128
+ * essentially, all the unicode string layouts start with a `PyASCIIObject` at
1129
+ * offset 0 (as of commit 6d43f6f081023b680d9db4542d19b9e382149f0a, somewhere
1130
+ * in the 3.8 development cycle).
1131
+ *
1132
+ * For safety against future CPython internal changes, the code checks that the
1133
+ * _base members of the unicode structs are what is expected in 3.7, and that
1134
+ * their offset is 0. It then walks the struct to the hash location to make sure
1135
+ * the offset is indeed the same as PyASCIIObject->hash.
1136
+ * Note: The large condition in the if should evaluate to a compile time
1137
+ * constant.
1138
+ */
1139
+
1140
+ #define MEMBER_SIZE(structure, member) sizeof(((structure *)0)->member)
1141
+
1142
+ NUMBA_EXPORT_FUNC(void *)
1143
+ numba_extract_unicode(PyObject *obj, Py_ssize_t *length, int *kind,
1144
+ unsigned int *ascii, Py_ssize_t *hash) {
1145
+ if (!PyUnicode_READY(obj)) {
1146
+ *length = PyUnicode_GET_LENGTH(obj);
1147
+ *kind = PyUnicode_KIND(obj);
1148
+ /* could also use PyUnicode_IS_ASCII but it is not publicly advertised in https://docs.python.org/3/c-api/unicode.html */
1149
+ *ascii = (unsigned int)(PyUnicode_MAX_CHAR_VALUE(obj) == (0x7f));
1150
+ /* this is here as a crude check for safe casting of all unicode string
1151
+ * structs to a PyASCIIObject */
1152
+ if (MEMBER_SIZE(PyCompactUnicodeObject, _base) == sizeof(PyASCIIObject) &&
1153
+ MEMBER_SIZE(PyUnicodeObject, _base) == sizeof(PyCompactUnicodeObject) &&
1154
+ offsetof(PyCompactUnicodeObject, _base) == 0 &&
1155
+ offsetof(PyUnicodeObject, _base) == 0 &&
1156
+ offsetof(PyCompactUnicodeObject, _base.hash) == offsetof(PyASCIIObject, hash) &&
1157
+ offsetof(PyUnicodeObject, _base._base.hash) == offsetof(PyASCIIObject, hash)
1158
+ ) {
1159
+ /* Grab the hash from the type object cache, do not compute it. */
1160
+ *hash = ((PyASCIIObject *)(obj))->hash;
1161
+ }
1162
+ else {
1163
+ /* cast is not safe, fail */
1164
+ return NULL;
1165
+ }
1166
+ return PyUnicode_DATA(obj);
1167
+ } else {
1168
+ return NULL;
1169
+ }
1170
+ }
1171
+
1172
+ /* this is late included as it #defines e.g. SHIFT that should not impact
1173
+ * the above */
1174
+ #include "_unicodetype_db.h"
1175
+
1176
+ /* This function is a modified copy of the private function gettyperecord from
1177
+ * CPython's Objects/unicodectype.c
1178
+ *
1179
+ * See:https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodectype.c#L45-L59
1180
+ */
1181
+ NUMBA_EXPORT_FUNC(void)
1182
+ numba_gettyperecord(Py_UCS4 code, int *upper, int *lower, int *title,
1183
+ unsigned char *decimal, unsigned char *digit,
1184
+ unsigned short *flags)
1185
+ {
1186
+ int index;
1187
+ const numba_PyUnicode_TypeRecord *rec;
1188
+
1189
+ if (code >= 0x110000)
1190
+ index = 0;
1191
+ else
1192
+ {
1193
+ index = index1[(code>>SHIFT)];
1194
+ index = index2[(index<<SHIFT)+(code&((1<<SHIFT)-1))];
1195
+ }
1196
+
1197
+ rec = &numba_PyUnicode_TypeRecords[index];
1198
+ *upper = rec->upper;
1199
+ *lower = rec->lower;
1200
+ *title = rec->title;
1201
+ *decimal = rec->decimal;
1202
+ *digit = rec->digit;
1203
+ *flags = rec->flags;
1204
+ }
1205
+
1206
+ /* This function provides a consistent access point for the
1207
+ * _PyUnicode_ExtendedCase array defined in CPython's Objects/unicodectype.c
1208
+ * and now also as numba_PyUnicode_ExtendedCase in Numba's _unicodetype_db.h
1209
+ */
1210
+ NUMBA_EXPORT_FUNC(Py_UCS4)
1211
+ numba_get_PyUnicode_ExtendedCase(int code)
1212
+ {
1213
+ return numba_PyUnicode_ExtendedCase[code];
1214
+ }
1215
+
1216
+ /* from _unicodetype_db.h */
1217
+ #undef SHIFT
1218
+
1219
+ /*
1220
+ * defined break point for gdb
1221
+ */
1222
+ NUMBA_EXPORT_FUNC(void)
1223
+ numba_gdb_breakpoint(void) {
1224
+ /* does nothing */
1225
+ }
1226
+
1227
+ /*
1228
+ * Define bridge for all math functions
1229
+ */
1230
+
1231
+ #define MATH_UNARY(F, R, A) \
1232
+ NUMBA_EXPORT_FUNC(R) numba_##F(A a) { return F(a); }
1233
+ #define MATH_BINARY(F, R, A, B) \
1234
+ NUMBA_EXPORT_FUNC(R) numba_##F(A a, B b) { return F(a, b); }
1235
+
1236
+ #include "mathnames.h"
1237
+
1238
+ #undef MATH_UNARY
1239
+ #undef MATH_BINARY
1240
+
1241
+ /*
1242
+ * BLAS and LAPACK wrappers
1243
+ */
1244
+
1245
+ #include "_lapack.c"
1246
+
1247
+ /*
1248
+ * PRNG support
1249
+ */
1250
+
1251
+ #include "_random.c"
lib/python3.10/site-packages/numba/_helpermod.c ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Expose all functions as pointers in a dedicated C extension.
3
+ */
4
+ #include "cext/cext.h"
5
+ /* Import _pymodule.h first, for a recent _POSIX_C_SOURCE */
6
+ #include "_pymodule.h"
7
+
8
+ #include <math.h>
9
+ #ifdef _MSC_VER
10
+ #define false 0
11
+ #define true 1
12
+ #define bool int
13
+ #else
14
+ #include <stdbool.h>
15
+ #endif
16
+
17
+ /*
18
+ Include C-extension here
19
+ */
20
+ #include "cext/cext.h"
21
+
22
+ /* Numba C helpers */
23
+ #include "_helperlib.c"
24
+
25
+ static PyObject *
26
+ build_c_helpers_dict(void)
27
+ {
28
+ PyObject *dct = PyDict_New();
29
+ if (dct == NULL)
30
+ goto error;
31
+
32
+ #define _declpointer(name, value) do { \
33
+ PyObject *o = PyLong_FromVoidPtr(value); \
34
+ if (o == NULL) goto error; \
35
+ if (PyDict_SetItemString(dct, name, o)) { \
36
+ Py_DECREF(o); \
37
+ goto error; \
38
+ } \
39
+ Py_DECREF(o); \
40
+ } while (0)
41
+
42
+ #define declmethod(func) _declpointer(#func, &numba_##func)
43
+
44
+ #define declpointer(ptr) _declpointer(#ptr, &numba_##ptr)
45
+
46
+ declmethod(fixed_fmod);
47
+ declmethod(fixed_fmodf);
48
+ declmethod(set_fnclex);
49
+
50
+ declmethod(sdiv);
51
+ declmethod(srem);
52
+ declmethod(udiv);
53
+ declmethod(urem);
54
+ declmethod(frexp);
55
+ declmethod(frexpf);
56
+ declmethod(ldexp);
57
+ declmethod(ldexpf);
58
+ declmethod(cpow);
59
+ declmethod(cpowf);
60
+ declmethod(erf);
61
+ declmethod(erff);
62
+ declmethod(erfc);
63
+ declmethod(erfcf);
64
+ declmethod(gamma);
65
+ declmethod(gammaf);
66
+ declmethod(lgamma);
67
+ declmethod(lgammaf);
68
+ declmethod(nextafter);
69
+ declmethod(nextafterf);
70
+ declmethod(complex_adaptor);
71
+ declmethod(adapt_ndarray);
72
+ declmethod(ndarray_new);
73
+ declmethod(extract_record_data);
74
+ declmethod(get_buffer);
75
+ declmethod(adapt_buffer);
76
+ declmethod(release_buffer);
77
+ declmethod(extract_np_datetime);
78
+ declmethod(create_np_datetime);
79
+ declmethod(extract_np_timedelta);
80
+ declmethod(create_np_timedelta);
81
+ declmethod(recreate_record);
82
+ declmethod(fptoui);
83
+ declmethod(fptouif);
84
+ declmethod(gil_ensure);
85
+ declmethod(gil_release);
86
+ declmethod(fatal_error);
87
+ declmethod(py_type);
88
+ declmethod(unpack_slice);
89
+ declmethod(do_raise);
90
+ declmethod(unpickle);
91
+ declmethod(runtime_build_excinfo_struct);
92
+ declmethod(attempt_nocopy_reshape);
93
+ declmethod(get_pyobject_private_data);
94
+ declmethod(set_pyobject_private_data);
95
+ declmethod(reset_pyobject_private_data);
96
+
97
+ /* BLAS / LAPACK */
98
+ declmethod(xxgemm);
99
+ declmethod(xxgemv);
100
+ declmethod(xxdot);
101
+ declmethod(xxgetrf);
102
+ declmethod(ez_xxgetri);
103
+ declmethod(xxpotrf);
104
+ declmethod(ez_rgeev);
105
+ declmethod(ez_cgeev);
106
+ declmethod(ez_xxxevd);
107
+ declmethod(ez_gesdd);
108
+ declmethod(ez_geqrf);
109
+ declmethod(ez_xxgqr);
110
+ declmethod(ez_gelsd);
111
+ declmethod(xgesv);
112
+ declmethod(xxnrm2);
113
+
114
+ /* PRNG support */
115
+ declmethod(get_py_random_state);
116
+ declmethod(get_np_random_state);
117
+ declmethod(get_internal_random_state);
118
+ declmethod(rnd_shuffle);
119
+ declmethod(rnd_init);
120
+ declmethod(poisson_ptrs);
121
+
122
+ /* Unicode string support */
123
+ declmethod(extract_unicode);
124
+ declmethod(gettyperecord);
125
+ declmethod(get_PyUnicode_ExtendedCase);
126
+
127
+ /* for gdb breakpoint */
128
+ declmethod(gdb_breakpoint);
129
+
130
+ /* for dictionary support */
131
+ declmethod(test_dict);
132
+ declmethod(dict_new_sized);
133
+ declmethod(dict_set_method_table);
134
+ declmethod(dict_free);
135
+ declmethod(dict_length);
136
+ declmethod(dict_lookup);
137
+ declmethod(dict_insert);
138
+ declmethod(dict_insert_ez);
139
+ declmethod(dict_delitem);
140
+ declmethod(dict_popitem);
141
+ declmethod(dict_iter_sizeof);
142
+ declmethod(dict_iter);
143
+ declmethod(dict_iter_next);
144
+ declmethod(dict_dump);
145
+
146
+ /* for list support */
147
+ declmethod(test_list);
148
+ declmethod(list_new);
149
+ declmethod(list_set_method_table);
150
+ declmethod(list_free);
151
+ declmethod(list_base_ptr);
152
+ declmethod(list_size_address);
153
+ declmethod(list_length);
154
+ declmethod(list_allocated);
155
+ declmethod(list_is_mutable);
156
+ declmethod(list_set_is_mutable);
157
+ declmethod(list_setitem);
158
+ declmethod(list_getitem);
159
+ declmethod(list_append);
160
+ declmethod(list_delitem);
161
+ declmethod(list_delete_slice);
162
+ declmethod(list_iter_sizeof);
163
+ declmethod(list_iter);
164
+ declmethod(list_iter_next);
165
+
166
+ #define MATH_UNARY(F, R, A) declmethod(F);
167
+ #define MATH_BINARY(F, R, A, B) declmethod(F);
168
+ #include "mathnames.h"
169
+ #undef MATH_UNARY
170
+ #undef MATH_BINARY
171
+
172
+ #undef declmethod
173
+ return dct;
174
+ error:
175
+ Py_XDECREF(dct);
176
+ return NULL;
177
+ }
178
+
179
+
180
+ /*
181
+ * Helper to deal with flushing stdout
182
+ */
183
+ PyAPI_FUNC(void) _numba_flush_stdout(void) ;
184
+
185
+ void
186
+ _numba_flush_stdout(void) {
187
+ fflush(stdout);
188
+ }
189
+
190
+
191
+ static PyMethodDef ext_methods[] = {
192
+ { "rnd_get_state", (PyCFunction) _numba_rnd_get_state, METH_O, NULL },
193
+ { "rnd_get_py_state_ptr", (PyCFunction) _numba_rnd_get_py_state_ptr, METH_NOARGS, NULL },
194
+ { "rnd_get_np_state_ptr", (PyCFunction) _numba_rnd_get_np_state_ptr, METH_NOARGS, NULL },
195
+ { "rnd_seed", (PyCFunction) _numba_rnd_seed, METH_VARARGS, NULL },
196
+ { "rnd_set_state", (PyCFunction) _numba_rnd_set_state, METH_VARARGS, NULL },
197
+ { "rnd_shuffle", (PyCFunction) _numba_rnd_shuffle, METH_O, NULL },
198
+ { "_import_cython_function", (PyCFunction) _numba_import_cython_function, METH_VARARGS, NULL },
199
+ { NULL },
200
+ };
201
+
202
+ /*
203
+ * These functions are exported by the module's DLL, to exercise ctypes / cffi
204
+ * without relying on libc availability (see https://bugs.python.org/issue23606)
205
+ */
206
+
207
+ PyAPI_FUNC(double) _numba_test_sin(double x);
208
+ PyAPI_FUNC(double) _numba_test_cos(double x);
209
+ PyAPI_FUNC(double) _numba_test_exp(double x);
210
+ PyAPI_FUNC(void) _numba_test_vsquare(int n, double *x, double *out);
211
+ PyAPI_FUNC(double) _numba_test_funcptr(double (*func)(double));
212
+ PyAPI_FUNC(bool) _numba_test_boolean(void);
213
+
214
+ double _numba_test_sin(double x)
215
+ {
216
+ return sin(x);
217
+ }
218
+
219
+ double _numba_test_cos(double x)
220
+ {
221
+ return cos(x);
222
+ }
223
+
224
+ double _numba_test_exp(double x)
225
+ {
226
+ return exp(x);
227
+ }
228
+
229
+ void _numba_test_vsquare(int n, double *x, double *out)
230
+ {
231
+ int i;
232
+ for (i = 0; i < n; i++)
233
+ out[i] = pow(x[i], 2.0);
234
+ }
235
+
236
+ void _numba_test_vcube(int n, double *x, double *out)
237
+ {
238
+ int i;
239
+ for (i = 0; i < n; i++)
240
+ out[i] = pow(x[i], 3.0);
241
+ }
242
+
243
+ double _numba_test_funcptr(double (*func)(double))
244
+ {
245
+ return func(1.5);
246
+ }
247
+
248
+ bool _numba_test_boolean()
249
+ {
250
+ return true;
251
+ }
252
+
253
+ MOD_INIT(_helperlib) {
254
+ PyObject *m;
255
+ MOD_DEF(m, "_helperlib", "No docs", ext_methods)
256
+ if (m == NULL)
257
+ return MOD_ERROR_VAL;
258
+
259
+ import_array();
260
+
261
+ PyModule_AddObject(m, "c_helpers", build_c_helpers_dict());
262
+ PyModule_AddIntConstant(m, "long_min", LONG_MIN);
263
+ PyModule_AddIntConstant(m, "long_max", LONG_MAX);
264
+ PyModule_AddIntConstant(m, "py_buffer_size", sizeof(Py_buffer));
265
+ PyModule_AddIntConstant(m, "py_gil_state_size", sizeof(PyGILState_STATE));
266
+ PyModule_AddIntConstant(m, "py_unicode_1byte_kind", PyUnicode_1BYTE_KIND);
267
+ PyModule_AddIntConstant(m, "py_unicode_2byte_kind", PyUnicode_2BYTE_KIND);
268
+ PyModule_AddIntConstant(m, "py_unicode_4byte_kind", PyUnicode_4BYTE_KIND);
269
+ #if (PY_MAJOR_VERSION == 3)
270
+ #if ((PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11))
271
+ PyModule_AddIntConstant(m, "py_unicode_wchar_kind", PyUnicode_WCHAR_KIND);
272
+ #endif
273
+ #endif
274
+ numba_rnd_ensure_global_init();
275
+
276
+ return MOD_SUCCESS_VAL(m);
277
+ }
lib/python3.10/site-packages/numba/_lapack.c ADDED
@@ -0,0 +1,1946 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * This file contains wrappers of BLAS and LAPACK functions
3
+ */
4
+ /*
5
+ * BLAS calling helpers. The helpers can be called without the GIL held.
6
+ * The caller is responsible for checking arguments (especially dimensions).
7
+ */
8
+
9
+ /* Fast getters caching the value of a function's address after
10
+ the first call to import_cblas_function(). */
11
+
12
+ #define EMIT_GET_CBLAS_FUNC(name) \
13
+ static void *cblas_ ## name = NULL; \
14
+ static void *get_cblas_ ## name(void) { \
15
+ if (cblas_ ## name == NULL) { \
16
+ PyGILState_STATE st = PyGILState_Ensure(); \
17
+ const char *mod = "scipy.linalg.cython_blas"; \
18
+ cblas_ ## name = import_cython_function(mod, # name); \
19
+ PyGILState_Release(st); \
20
+ } \
21
+ return cblas_ ## name; \
22
+ }
23
+
24
+ EMIT_GET_CBLAS_FUNC(dgemm)
25
+ EMIT_GET_CBLAS_FUNC(sgemm)
26
+ EMIT_GET_CBLAS_FUNC(cgemm)
27
+ EMIT_GET_CBLAS_FUNC(zgemm)
28
+ EMIT_GET_CBLAS_FUNC(dgemv)
29
+ EMIT_GET_CBLAS_FUNC(sgemv)
30
+ EMIT_GET_CBLAS_FUNC(cgemv)
31
+ EMIT_GET_CBLAS_FUNC(zgemv)
32
+ EMIT_GET_CBLAS_FUNC(ddot)
33
+ EMIT_GET_CBLAS_FUNC(sdot)
34
+ EMIT_GET_CBLAS_FUNC(cdotu)
35
+ EMIT_GET_CBLAS_FUNC(zdotu)
36
+ EMIT_GET_CBLAS_FUNC(cdotc)
37
+ EMIT_GET_CBLAS_FUNC(zdotc)
38
+ EMIT_GET_CBLAS_FUNC(snrm2)
39
+ EMIT_GET_CBLAS_FUNC(dnrm2)
40
+ EMIT_GET_CBLAS_FUNC(scnrm2)
41
+ EMIT_GET_CBLAS_FUNC(dznrm2)
42
+
43
+
44
+ #undef EMIT_GET_CBLAS_FUNC
45
+
46
+ /*
47
+ * NOTE: On return value convention.
48
+ * For LAPACK wrapper development the following conventions are followed:
49
+ * Publicly exposed wrapper functions must return:-
50
+ * STATUS_ERROR : For an unrecoverable error e.g. caught by xerbla, this is so
51
+ * a Py_FatalError can be raised.
52
+ * STATUS_SUCCESS: For successful execution
53
+ * +n : Where n is an integer for a routine specific error
54
+ * (typically derived from an `info` argument).
55
+ *
56
+ * The caller is responsible for checking and handling the error status.
57
+ */
58
+
59
+ /* return STATUS_SUCCESS if everything went ok */
60
+ #define STATUS_SUCCESS (0)
61
+
62
+ /* return STATUS_ERROR if an unrecoverable error is encountered */
63
+ #define STATUS_ERROR (-1)
64
+
65
+ /*
66
+ * A union of all the types accepted by BLAS/LAPACK for use in cases where
67
+ * stack based allocation is needed (typically for work space query args length
68
+ * 1).
69
+ */
70
+ typedef union all_dtypes_
71
+ {
72
+ float s;
73
+ double d;
74
+ npy_complex64 c;
75
+ npy_complex128 z;
76
+ } all_dtypes;
77
+
78
+ /*
79
+ * A checked PyMem_RawMalloc, ensures that the var is either NULL
80
+ * and an exception is raised, or that the allocation was successful.
81
+ * Returns zero on success for status checking.
82
+ */
83
+ static int checked_PyMem_RawMalloc(void** var, size_t bytes)
84
+ {
85
+ *var = NULL;
86
+ *var = PyMem_RawMalloc(bytes);
87
+ if (!(*var))
88
+ {
89
+ {
90
+ PyGILState_STATE st = PyGILState_Ensure();
91
+
92
+ PyErr_SetString(PyExc_MemoryError,
93
+ "Insufficient memory for buffer allocation\
94
+ required by LAPACK.");
95
+ PyGILState_Release(st);
96
+ }
97
+ return 1;
98
+ }
99
+ return 0;
100
+ }
101
+
102
+ /*
103
+ * Checks that the char kind is valid (one of [s,d,c,z]) for use in blas/lapack.
104
+ * Returns zero on success for status checking.
105
+ */
106
+ static int check_kind(char kind)
107
+ {
108
+ switch (kind)
109
+ {
110
+ case 's':
111
+ case 'd':
112
+ case 'c':
113
+ case 'z':
114
+ break;
115
+ default:
116
+ {
117
+ PyGILState_STATE st = PyGILState_Ensure();
118
+ PyErr_SetString(PyExc_ValueError,
119
+ "invalid data type (kind) found");
120
+ PyGILState_Release(st);
121
+ }
122
+ return 1;
123
+ }
124
+ return 0;
125
+ }
126
+
127
+ /*
128
+ * Guard macro for ensuring a valid data "kind" is being used.
129
+ * Place at the top of all routines with switches on "kind" that accept
130
+ * one of [s,d,c,z].
131
+ */
132
+ #define ENSURE_VALID_KIND(__KIND) \
133
+ if (check_kind( __KIND )) \
134
+ { \
135
+ return STATUS_ERROR; \
136
+ } \
137
+
138
+ /*
139
+ * Checks that the char kind is valid for the real domain (one of [s,d])
140
+ * for use in blas/lapack.
141
+ * Returns zero on success for status checking.
142
+ */
143
+ static int check_real_kind(char kind)
144
+ {
145
+ switch (kind)
146
+ {
147
+ case 's':
148
+ case 'd':
149
+ break;
150
+ default:
151
+ {
152
+ PyGILState_STATE st = PyGILState_Ensure();
153
+ PyErr_SetString(PyExc_ValueError,
154
+ "invalid data type (kind) found");
155
+ PyGILState_Release(st);
156
+ }
157
+ return 1;
158
+ }
159
+ return 0;
160
+ }
161
+
162
+ /*
163
+ * Guard macro for ensuring a valid data "kind" is being used for the
164
+ * real domain routines.
165
+ * Place at the top of all routines with switches on "kind" that accept
166
+ * one of [s,d].
167
+ */
168
+ #define ENSURE_VALID_REAL_KIND(__KIND) \
169
+ if (check_real_kind( __KIND )) \
170
+ { \
171
+ return STATUS_ERROR; \
172
+ } \
173
+
174
+
175
+ /*
176
+ * Checks that the char kind is valid for the complex domain (one of [c,z])
177
+ * for use in blas/lapack.
178
+ * Returns zero on success for status checking.
179
+ */
180
+ static int check_complex_kind(char kind)
181
+ {
182
+ switch (kind)
183
+ {
184
+ case 'c':
185
+ case 'z':
186
+ break;
187
+ default:
188
+ {
189
+ PyGILState_STATE st = PyGILState_Ensure();
190
+ PyErr_SetString(PyExc_ValueError,
191
+ "invalid data type (kind) found");
192
+ PyGILState_Release(st);
193
+ }
194
+ return 1;
195
+ }
196
+ return 0;
197
+ }
198
+
199
+ /*
200
+ * Guard macro for ensuring a valid data "kind" is being used for the
201
+ * real domain routines.
202
+ * Place at the top of all routines with switches on "kind" that accept
203
+ * one of [c,z].
204
+ */
205
+ #define ENSURE_VALID_COMPLEX_KIND(__KIND) \
206
+ if (check_complex_kind( __KIND )) \
207
+ { \
208
+ return STATUS_ERROR; \
209
+ } \
210
+
211
+
212
+ /*
213
+ * Checks that a function is found (i.e. not null)
214
+ * Returns zero on success for status checking.
215
+ */
216
+ static int check_func(void *func)
217
+ {
218
+ if (func == NULL)
219
+ {
220
+ PyGILState_STATE st = PyGILState_Ensure();
221
+ PyErr_SetString(PyExc_RuntimeError,
222
+ "Specified LAPACK function could not be found.");
223
+ PyGILState_Release(st);
224
+ return STATUS_ERROR;
225
+ }
226
+ return STATUS_SUCCESS;
227
+ }
228
+
229
+
230
+ /*
231
+ * Guard macro for ensuring a valid function is found.
232
+ */
233
+ #define ENSURE_VALID_FUNC(__FUNC) \
234
+ if (check_func(__FUNC)) \
235
+ { \
236
+ return STATUS_ERROR; \
237
+ } \
238
+
239
+
240
+ /*
241
+ * Define what a Fortran "int" is, some LAPACKs have 64 bit integer support
242
+ * numba presently opts for a 32 bit C int.
243
+ * This definition allows scope for later configuration time magic to adjust
244
+ * the size of int at all the call sites.
245
+ */
246
+ #define F_INT int
247
+
248
+
249
+ typedef float (*sdot_t)(F_INT *n, void *dx, F_INT *incx, void *dy, F_INT *incy);
250
+ typedef double (*ddot_t)(F_INT *n, void *dx, F_INT *incx, void *dy, F_INT
251
+ *incy);
252
+ typedef npy_complex64 (*cdot_t)(F_INT *n, void *dx, F_INT *incx, void *dy,
253
+ F_INT *incy);
254
+ typedef npy_complex128 (*zdot_t)(F_INT *n, void *dx, F_INT *incx, void *dy,
255
+ F_INT *incy);
256
+
257
+ typedef void (*xxgemv_t)(char *trans, F_INT *m, F_INT *n,
258
+ void *alpha, void *a, F_INT *lda,
259
+ void *x, F_INT *incx, void *beta,
260
+ void *y, F_INT *incy);
261
+
262
+ typedef void (*xxgemm_t)(char *transa, char *transb,
263
+ F_INT *m, F_INT *n, F_INT *k,
264
+ void *alpha, void *a, F_INT *lda,
265
+ void *b, F_INT *ldb, void *beta,
266
+ void *c, F_INT *ldc);
267
+
268
+ typedef float (*sxnrm2_t) (F_INT *n, void *x, F_INT *incx);
269
+ typedef double (*dxnrm2_t) (F_INT *n, void *x, F_INT *incx);
270
+
271
+ /* Vector * vector: result = dx * dy */
272
+ NUMBA_EXPORT_FUNC(int)
273
+ numba_xxdot(char kind, char conjugate, Py_ssize_t n, void *dx, void *dy,
274
+ void *result)
275
+ {
276
+ void *raw_func = NULL;
277
+ F_INT _n;
278
+ F_INT inc = 1;
279
+
280
+ ENSURE_VALID_KIND(kind)
281
+
282
+ switch (kind)
283
+ {
284
+ case 's':
285
+ raw_func = get_cblas_sdot();
286
+ break;
287
+ case 'd':
288
+ raw_func = get_cblas_ddot();
289
+ break;
290
+ case 'c':
291
+ raw_func = conjugate ? get_cblas_cdotc() : get_cblas_cdotu();
292
+ break;
293
+ case 'z':
294
+ raw_func = conjugate ? get_cblas_zdotc() : get_cblas_zdotu();
295
+ break;
296
+ }
297
+ ENSURE_VALID_FUNC(raw_func)
298
+
299
+ _n = (F_INT) n;
300
+
301
+ switch (kind)
302
+ {
303
+ case 's':
304
+ *(float *) result = (*(sdot_t) raw_func)(&_n, dx, &inc, dy, &inc);;
305
+ break;
306
+ case 'd':
307
+ *(double *) result = (*(ddot_t) raw_func)(&_n, dx, &inc, dy, &inc);;
308
+ break;
309
+ case 'c':
310
+ *(npy_complex64 *) result = (*(cdot_t) raw_func)(&_n, dx, &inc, dy,\
311
+ &inc);;
312
+ break;
313
+ case 'z':
314
+ *(npy_complex128 *) result = (*(zdot_t) raw_func)(&_n, dx, &inc,\
315
+ dy, &inc);;
316
+ break;
317
+ }
318
+
319
+ return 0;
320
+ }
321
+
322
+ /* Matrix * vector: y = alpha * a * x + beta * y */
323
+ NUMBA_EXPORT_FUNC(int)
324
+ numba_xxgemv(char kind, char trans, Py_ssize_t m, Py_ssize_t n,
325
+ void *alpha, void *a, Py_ssize_t lda,
326
+ void *x, void *beta, void *y)
327
+ {
328
+ void *raw_func = NULL;
329
+ F_INT _m, _n;
330
+ F_INT _lda;
331
+ F_INT inc = 1;
332
+
333
+ ENSURE_VALID_KIND(kind)
334
+
335
+ switch (kind)
336
+ {
337
+ case 's':
338
+ raw_func = get_cblas_sgemv();
339
+ break;
340
+ case 'd':
341
+ raw_func = get_cblas_dgemv();
342
+ break;
343
+ case 'c':
344
+ raw_func = get_cblas_cgemv();
345
+ break;
346
+ case 'z':
347
+ raw_func = get_cblas_zgemv();
348
+ break;
349
+ }
350
+ ENSURE_VALID_FUNC(raw_func)
351
+
352
+ _m = (F_INT) m;
353
+ _n = (F_INT) n;
354
+ _lda = (F_INT) lda;
355
+
356
+ (*(xxgemv_t) raw_func)(&trans, &_m, &_n, alpha, a, &_lda,
357
+ x, &inc, beta, y, &inc);
358
+ return 0;
359
+ }
360
+
361
+ /* Matrix * matrix: c = alpha * a * b + beta * c */
362
+ NUMBA_EXPORT_FUNC(int)
363
+ numba_xxgemm(char kind, char transa, char transb,
364
+ Py_ssize_t m, Py_ssize_t n, Py_ssize_t k,
365
+ void *alpha, void *a, Py_ssize_t lda,
366
+ void *b, Py_ssize_t ldb, void *beta,
367
+ void *c, Py_ssize_t ldc)
368
+ {
369
+ void *raw_func = NULL;
370
+ F_INT _m, _n, _k;
371
+ F_INT _lda, _ldb, _ldc;
372
+
373
+ ENSURE_VALID_KIND(kind)
374
+
375
+ switch (kind)
376
+ {
377
+ case 's':
378
+ raw_func = get_cblas_sgemm();
379
+ break;
380
+ case 'd':
381
+ raw_func = get_cblas_dgemm();
382
+ break;
383
+ case 'c':
384
+ raw_func = get_cblas_cgemm();
385
+ break;
386
+ case 'z':
387
+ raw_func = get_cblas_zgemm();
388
+ break;
389
+ }
390
+ ENSURE_VALID_FUNC(raw_func)
391
+
392
+ _m = (F_INT) m;
393
+ _n = (F_INT) n;
394
+ _k = (F_INT) k;
395
+ _lda = (F_INT) lda;
396
+ _ldb = (F_INT) ldb;
397
+ _ldc = (F_INT) ldc;
398
+
399
+ (*(xxgemm_t) raw_func)(&transa, &transb, &_m, &_n, &_k, alpha, a, &_lda,
400
+ b, &_ldb, beta, c, &_ldc);
401
+ return 0;
402
+ }
403
+
404
+
405
+ /* L2-norms */
406
+ NUMBA_EXPORT_FUNC(F_INT)
407
+ numba_xxnrm2(char kind, Py_ssize_t n, void * x, Py_ssize_t incx, void * result)
408
+ {
409
+ void *raw_func = NULL;
410
+ F_INT _incx;
411
+ F_INT _n;
412
+
413
+ ENSURE_VALID_KIND(kind)
414
+
415
+ switch (kind)
416
+ {
417
+ case 's':
418
+ raw_func = get_cblas_snrm2();
419
+ break;
420
+ case 'd':
421
+ raw_func = get_cblas_dnrm2();
422
+ break;
423
+ case 'c':
424
+ raw_func = get_cblas_scnrm2();
425
+ break;
426
+ case 'z':
427
+ raw_func = get_cblas_dznrm2();
428
+ break;
429
+ }
430
+ ENSURE_VALID_FUNC(raw_func)
431
+
432
+ _n = (F_INT) n;
433
+ _incx = (F_INT) incx;
434
+
435
+ switch (kind)
436
+ {
437
+ case 's':
438
+ *(float *) result = (*(sxnrm2_t) raw_func)(&_n, x, &_incx);;
439
+ break;
440
+ case 'd':
441
+ *(double *) result = (*(dxnrm2_t) raw_func)(&_n, x, &_incx);;
442
+ break;
443
+ case 'c':
444
+ *(float *) result = (*(sxnrm2_t) raw_func)(&_n, x, &_incx);;
445
+ break;
446
+ case 'z':
447
+ *(double *) result = (*(dxnrm2_t) raw_func)(&_n, x, &_incx);;
448
+ break;
449
+ }
450
+
451
+ return 0;
452
+ }
453
+
454
+
455
+ /*
456
+ * LAPACK calling helpers. The helpers can be called without the GIL held.
457
+ * The caller is responsible for checking arguments (especially dimensions).
458
+ */
459
+
460
+ /* Fast getters caching the value of a function's address after
461
+ the first call to import_clapack_function(). */
462
+
463
+ #define EMIT_GET_CLAPACK_FUNC(name) \
464
+ static void *clapack_ ## name = NULL; \
465
+ static void *get_clapack_ ## name(void) { \
466
+ if (clapack_ ## name == NULL) { \
467
+ PyGILState_STATE st = PyGILState_Ensure(); \
468
+ const char *mod = "scipy.linalg.cython_lapack"; \
469
+ clapack_ ## name = import_cython_function(mod, # name); \
470
+ PyGILState_Release(st); \
471
+ } \
472
+ return clapack_ ## name; \
473
+ }
474
+
475
+ /* Computes an LU factorization of a general M-by-N matrix A
476
+ * using partial pivoting with row interchanges.
477
+ */
478
+ EMIT_GET_CLAPACK_FUNC(sgetrf)
479
+ EMIT_GET_CLAPACK_FUNC(dgetrf)
480
+ EMIT_GET_CLAPACK_FUNC(cgetrf)
481
+ EMIT_GET_CLAPACK_FUNC(zgetrf)
482
+
483
+ /* Computes the inverse of a matrix using the LU factorization
484
+ * computed by xGETRF.
485
+ */
486
+ EMIT_GET_CLAPACK_FUNC(sgetri)
487
+ EMIT_GET_CLAPACK_FUNC(dgetri)
488
+ EMIT_GET_CLAPACK_FUNC(cgetri)
489
+ EMIT_GET_CLAPACK_FUNC(zgetri)
490
+
491
+ /* Compute Cholesky factorizations */
492
+ EMIT_GET_CLAPACK_FUNC(spotrf)
493
+ EMIT_GET_CLAPACK_FUNC(dpotrf)
494
+ EMIT_GET_CLAPACK_FUNC(cpotrf)
495
+ EMIT_GET_CLAPACK_FUNC(zpotrf)
496
+
497
+ /* Computes for an N-by-N real nonsymmetric matrix A, the
498
+ * eigenvalues and, optionally, the left and/or right eigenvectors.
499
+ */
500
+ EMIT_GET_CLAPACK_FUNC(sgeev)
501
+ EMIT_GET_CLAPACK_FUNC(dgeev)
502
+ EMIT_GET_CLAPACK_FUNC(cgeev)
503
+ EMIT_GET_CLAPACK_FUNC(zgeev)
504
+
505
+ /* Computes for an N-by-N Hermitian matrix A, the
506
+ * eigenvalues and, optionally, the left and/or right eigenvectors.
507
+ */
508
+ EMIT_GET_CLAPACK_FUNC(ssyevd)
509
+ EMIT_GET_CLAPACK_FUNC(dsyevd)
510
+ EMIT_GET_CLAPACK_FUNC(cheevd)
511
+ EMIT_GET_CLAPACK_FUNC(zheevd)
512
+
513
+ /* Computes generalised SVD */
514
+ EMIT_GET_CLAPACK_FUNC(sgesdd)
515
+ EMIT_GET_CLAPACK_FUNC(dgesdd)
516
+ EMIT_GET_CLAPACK_FUNC(cgesdd)
517
+ EMIT_GET_CLAPACK_FUNC(zgesdd)
518
+
519
+ /* Computes QR decompositions */
520
+ EMIT_GET_CLAPACK_FUNC(sgeqrf)
521
+ EMIT_GET_CLAPACK_FUNC(dgeqrf)
522
+ EMIT_GET_CLAPACK_FUNC(cgeqrf)
523
+ EMIT_GET_CLAPACK_FUNC(zgeqrf)
524
+
525
+ /* Computes columns of Q from elementary reflectors produced by xgeqrf() (QR).
526
+ */
527
+ EMIT_GET_CLAPACK_FUNC(sorgqr)
528
+ EMIT_GET_CLAPACK_FUNC(dorgqr)
529
+ EMIT_GET_CLAPACK_FUNC(cungqr)
530
+ EMIT_GET_CLAPACK_FUNC(zungqr)
531
+
532
+ /* Computes the minimum norm solution to linear least squares problems */
533
+ EMIT_GET_CLAPACK_FUNC(sgelsd)
534
+ EMIT_GET_CLAPACK_FUNC(dgelsd)
535
+ EMIT_GET_CLAPACK_FUNC(cgelsd)
536
+ EMIT_GET_CLAPACK_FUNC(zgelsd)
537
+
538
+ // Computes the solution to a system of linear equations
539
+ EMIT_GET_CLAPACK_FUNC(sgesv)
540
+ EMIT_GET_CLAPACK_FUNC(dgesv)
541
+ EMIT_GET_CLAPACK_FUNC(cgesv)
542
+ EMIT_GET_CLAPACK_FUNC(zgesv)
543
+
544
+
545
+ #undef EMIT_GET_CLAPACK_FUNC
546
+
547
+ typedef void (*xxgetrf_t)(F_INT *m, F_INT *n, void *a, F_INT *lda, F_INT *ipiv,
548
+ F_INT *info);
549
+
550
+ typedef void (*xxgetri_t)(F_INT *n, void *a, F_INT *lda, F_INT *ipiv, void
551
+ *work, F_INT *lwork, F_INT *info);
552
+
553
+ typedef void (*xxpotrf_t)(char *uplo, F_INT *n, void *a, F_INT *lda, F_INT
554
+ *info);
555
+
556
+ typedef void (*rgeev_t)(char *jobvl, char *jobvr, F_INT *n, void *a, F_INT *lda,
557
+ void *wr, void *wi, void *vl, F_INT *ldvl, void *vr,
558
+ F_INT *ldvr, void *work, F_INT *lwork, F_INT *info);
559
+
560
+ typedef void (*cgeev_t)(char *jobvl, char *jobvr, F_INT *n, void *a, F_INT
561
+ *lda, void *w, void *vl, F_INT *ldvl, void *vr,
562
+ F_INT *ldvr, void *work, F_INT *lwork, void *rwork,
563
+ F_INT *info);
564
+
565
+ typedef void (*rgesdd_t)(char *jobz, F_INT *m, F_INT *n, void *a, F_INT *lda,
566
+ void *s, void *u, F_INT *ldu, void *vt, F_INT *ldvt,
567
+ void *work, F_INT *lwork, F_INT *iwork, F_INT *info);
568
+
569
+ typedef void (*cgesdd_t)(char *jobz, F_INT *m, F_INT *n, void *a, F_INT *lda,
570
+ void *s, void * u, F_INT *ldu, void * vt, F_INT *ldvt,
571
+ void *work, F_INT *lwork, void *rwork, F_INT *iwork,
572
+ F_INT *info);
573
+
574
+ typedef void (*xsyevd_t)(char *jobz, char *uplo, F_INT *n, void *a, F_INT *lda,
575
+ void *w, void *work, F_INT *lwork, F_INT *iwork,
576
+ F_INT *liwork, F_INT *info);
577
+
578
+ typedef void (*xheevd_t)(char *jobz, char *uplo, F_INT *n, void *a, F_INT *lda,
579
+ void *w, void *work, F_INT *lwork, void *rwork,
580
+ F_INT *lrwork, F_INT *iwork, F_INT *liwork,
581
+ F_INT *info);
582
+
583
+ typedef void (*xgeqrf_t)(F_INT *m, F_INT *n, void *a, F_INT *lda, void *tau,
584
+ void *work, F_INT *lwork, F_INT *info);
585
+
586
+ typedef void (*xxxgqr_t)(F_INT *m, F_INT *n, F_INT *k, void *a, F_INT *lda,
587
+ void *tau, void *work, F_INT *lwork, F_INT *info);
588
+
589
+ typedef void (*rgelsd_t)(F_INT *m, F_INT *n, F_INT *nrhs, void *a, F_INT *lda,
590
+ void *b, F_INT *ldb, void *s, void *rcond, F_INT *rank,
591
+ void *work, F_INT *lwork, F_INT *iwork, F_INT *info);
592
+
593
+ typedef void (*cgelsd_t)(F_INT *m, F_INT *n, F_INT *nrhs, void *a, F_INT *lda,
594
+ void *b, F_INT *ldb, void *s, void *rcond, F_INT *rank,
595
+ void *work, F_INT *lwork, void *rwork, F_INT *iwork,
596
+ F_INT *info);
597
+
598
+ typedef void (*xgesv_t)(F_INT *n, F_INT *nrhs, void *a, F_INT *lda, F_INT *ipiv,
599
+ void *b, F_INT *ldb, F_INT *info);
600
+
601
+
602
+
603
+ /*
604
+ * kind_size()
605
+ * gets the data size appropriate for a specified kind.
606
+ *
607
+ * Input:
608
+ * kind - the kind, one of:
609
+ * (s, d, c, z) = (float, double, complex, double complex).
610
+ *
611
+ * Returns:
612
+ * data_size - the appropriate data size.
613
+ *
614
+ */
615
+ static size_t kind_size(char kind)
616
+ {
617
+ size_t data_size = 0;
618
+ switch (kind)
619
+ {
620
+ case 's':
621
+ data_size = sizeof(float);
622
+ break;
623
+ case 'd':
624
+ data_size = sizeof(double);
625
+ break;
626
+ case 'c':
627
+ data_size = sizeof(npy_complex64);
628
+ break;
629
+ case 'z':
630
+ data_size = sizeof(npy_complex128);
631
+ break;
632
+ }
633
+ return data_size;
634
+
635
+ }
636
+
637
+ /*
638
+ * underlying_float_kind()
639
+ * gets the underlying float kind for a given kind.
640
+ *
641
+ * Input:
642
+ * kind - the kind, one of:
643
+ * (s, d, c, z) = (float, double, complex, double complex).
644
+ *
645
+ * Returns:
646
+ * underlying_float_kind - the underlying float kind, one of:
647
+ * (s, d) = (float, double).
648
+ *
649
+ * This function essentially provides a map between the char kind
650
+ * of a type and the char kind of the underlying float used in the
651
+ * type. Essentially:
652
+ * ---------------
653
+ * Input -> Output
654
+ * ---------------
655
+ * s -> s
656
+ * d -> d
657
+ * c -> s
658
+ * z -> d
659
+ * ---------------
660
+ *
661
+ */
662
+ static char underlying_float_kind(char kind)
663
+ {
664
+ switch(kind)
665
+ {
666
+ case 's':
667
+ case 'c':
668
+ return 's';
669
+ case 'd':
670
+ case 'z':
671
+ return 'd';
672
+ default:
673
+ {
674
+ PyGILState_STATE st = PyGILState_Ensure();
675
+ PyErr_SetString(PyExc_ValueError,
676
+ "invalid kind in underlying_float_kind()");
677
+ PyGILState_Release(st);
678
+ }
679
+ }
680
+ return -1;
681
+ }
682
+
683
+ /*
684
+ * cast_from_X()
685
+ * cast from a kind (s, d, c, z) = (float, double, complex, double complex)
686
+ * to a Fortran integer.
687
+ *
688
+ * Parameters:
689
+ * kind the kind of val
690
+ * val a pointer to the value to cast
691
+ *
692
+ * Returns:
693
+ * A Fortran int from a cast of val (in complex case, takes the real part).
694
+ *
695
+ * Struct access via non c99 (python only) cmplx types, used for compatibility.
696
+ */
697
+ static F_INT
698
+ cast_from_X(char kind, void *val)
699
+ {
700
+ switch(kind)
701
+ {
702
+ case 's':
703
+ return (F_INT)(*((float *) val));
704
+ case 'd':
705
+ return (F_INT)(*((double *) val));
706
+ case 'c':
707
+ return (F_INT)crealf(*((_complex_float_t *)val));
708
+ case 'z':
709
+ return (F_INT)creal(*((_complex_double_t *)val));
710
+ default:
711
+ {
712
+ PyGILState_STATE st = PyGILState_Ensure();
713
+ PyErr_SetString(PyExc_ValueError,
714
+ "invalid kind in cast");
715
+ PyGILState_Release(st);
716
+ }
717
+ }
718
+ return -1;
719
+ }
720
+
721
+
722
+ #define CATCH_LAPACK_INVALID_ARG(__routine, info) \
723
+ do { \
724
+ if (info < 0) { \
725
+ PyGILState_STATE st = PyGILState_Ensure(); \
726
+ PyErr_Format(PyExc_RuntimeError, \
727
+ "LAPACK Error: Routine " #__routine ". On input %d\n",\
728
+ -(int) info); \
729
+ PyGILState_Release(st); \
730
+ return STATUS_ERROR; \
731
+ } \
732
+ } while(0)
733
+
734
+ /* Compute LU decomposition of A
735
+ * NOTE: ipiv is an array of Fortran integers allocated by the caller,
736
+ * which is therefore expected to use the right dtype.
737
+ */
738
+ NUMBA_EXPORT_FUNC(int)
739
+ numba_xxgetrf(char kind, Py_ssize_t m, Py_ssize_t n, void *a, Py_ssize_t lda,
740
+ F_INT *ipiv)
741
+ {
742
+ void *raw_func = NULL;
743
+ F_INT _m, _n, _lda, info;
744
+
745
+ ENSURE_VALID_KIND(kind)
746
+
747
+ switch (kind)
748
+ {
749
+ case 's':
750
+ raw_func = get_clapack_sgetrf();
751
+ break;
752
+ case 'd':
753
+ raw_func = get_clapack_dgetrf();
754
+ break;
755
+ case 'c':
756
+ raw_func = get_clapack_cgetrf();
757
+ break;
758
+ case 'z':
759
+ raw_func = get_clapack_zgetrf();
760
+ break;
761
+ }
762
+ ENSURE_VALID_FUNC(raw_func)
763
+
764
+ _m = (F_INT) m;
765
+ _n = (F_INT) n;
766
+ _lda = (F_INT) lda;
767
+
768
+ (*(xxgetrf_t) raw_func)(&_m, &_n, a, &_lda, ipiv, &info);
769
+ CATCH_LAPACK_INVALID_ARG("xxgetrf", info);
770
+
771
+ return (int)info;
772
+ }
773
+
774
+ /* Compute the inverse of a matrix given its LU decomposition
775
+ * Args are as per LAPACK.
776
+ */
777
+ static int
778
+ numba_raw_xxgetri(char kind, F_INT n, void *a, F_INT lda,
779
+ F_INT *ipiv, void *work, F_INT *lwork, F_INT *info)
780
+ {
781
+ void *raw_func = NULL;
782
+
783
+ ENSURE_VALID_KIND(kind)
784
+
785
+ switch (kind)
786
+ {
787
+ case 's':
788
+ raw_func = get_clapack_sgetri();
789
+ break;
790
+ case 'd':
791
+ raw_func = get_clapack_dgetri();
792
+ break;
793
+ case 'c':
794
+ raw_func = get_clapack_cgetri();
795
+ break;
796
+ case 'z':
797
+ raw_func = get_clapack_zgetri();
798
+ break;
799
+ }
800
+ ENSURE_VALID_FUNC(raw_func)
801
+
802
+ (*(xxgetri_t) raw_func)(&n, a, &lda, ipiv, work, lwork, info);
803
+
804
+ return 0;
805
+ }
806
+
807
+ /* Compute the inverse of a matrix from the factorization provided by
808
+ * xxgetrf. (see numba_xxgetrf() about ipiv)
809
+ * Args are as per LAPACK.
810
+ */
811
+ NUMBA_EXPORT_FUNC(int)
812
+ numba_ez_xxgetri(char kind, Py_ssize_t n, void *a, Py_ssize_t lda,
813
+ F_INT *ipiv)
814
+ {
815
+ F_INT _n, _lda;
816
+ F_INT lwork = -1;
817
+ F_INT info = 0;
818
+ size_t base_size = -1;
819
+ void * work = NULL;
820
+ all_dtypes stack_slot;
821
+
822
+ ENSURE_VALID_KIND(kind)
823
+
824
+ _n = (F_INT)n;
825
+ _lda = (F_INT)lda;
826
+
827
+ base_size = kind_size(kind);
828
+
829
+ work = &stack_slot;
830
+
831
+ numba_raw_xxgetri(kind, _n, a, _lda, ipiv, work, &lwork, &info);
832
+ CATCH_LAPACK_INVALID_ARG("xxgetri", info);
833
+
834
+ lwork = cast_from_X(kind, work);
835
+
836
+ if (checked_PyMem_RawMalloc(&work, base_size * lwork))
837
+ {
838
+ return STATUS_ERROR;
839
+ }
840
+
841
+ numba_raw_xxgetri(kind, _n, a, _lda, ipiv, work, &lwork, &info);
842
+ PyMem_RawFree(work);
843
+ CATCH_LAPACK_INVALID_ARG("xxgetri", info);
844
+
845
+ return (int)info;
846
+ }
847
+
848
+ /* Compute the Cholesky factorization of a matrix. */
849
+ NUMBA_EXPORT_FUNC(int)
850
+ numba_xxpotrf(char kind, char uplo, Py_ssize_t n, void *a, Py_ssize_t lda)
851
+ {
852
+ void *raw_func = NULL;
853
+ F_INT _n, _lda, info;
854
+
855
+ ENSURE_VALID_KIND(kind)
856
+
857
+ switch (kind)
858
+ {
859
+ case 's':
860
+ raw_func = get_clapack_spotrf();
861
+ break;
862
+ case 'd':
863
+ raw_func = get_clapack_dpotrf();
864
+ break;
865
+ case 'c':
866
+ raw_func = get_clapack_cpotrf();
867
+ break;
868
+ case 'z':
869
+ raw_func = get_clapack_zpotrf();
870
+ break;
871
+ }
872
+ ENSURE_VALID_FUNC(raw_func)
873
+
874
+ _n = (F_INT) n;
875
+ _lda = (F_INT) lda;
876
+
877
+ (*(xxpotrf_t) raw_func)(&uplo, &_n, a, &_lda, &info);
878
+ CATCH_LAPACK_INVALID_ARG("xxpotrf", info);
879
+ return (int)info;
880
+ }
881
+
882
+
883
+ /* real space eigen systems info from dgeev/sgeev */
884
+ static int
885
+ numba_raw_rgeev(char kind, char jobvl, char jobvr,
886
+ Py_ssize_t n, void *a, Py_ssize_t lda, void *wr, void *wi,
887
+ void *vl, Py_ssize_t ldvl, void *vr, Py_ssize_t ldvr,
888
+ void *work, Py_ssize_t lwork, F_INT *info)
889
+ {
890
+ void *raw_func = NULL;
891
+ F_INT _n, _lda, _ldvl, _ldvr, _lwork;
892
+
893
+ ENSURE_VALID_REAL_KIND(kind)
894
+
895
+ switch (kind)
896
+ {
897
+ case 's':
898
+ raw_func = get_clapack_sgeev();
899
+ break;
900
+ case 'd':
901
+ raw_func = get_clapack_dgeev();
902
+ break;
903
+ }
904
+ ENSURE_VALID_FUNC(raw_func)
905
+
906
+ _n = (F_INT) n;
907
+ _lda = (F_INT) lda;
908
+ _ldvl = (F_INT) ldvl;
909
+ _ldvr = (F_INT) ldvr;
910
+ _lwork = (F_INT) lwork;
911
+
912
+ (*(rgeev_t) raw_func)(&jobvl, &jobvr, &_n, a, &_lda, wr, wi, vl, &_ldvl, vr,
913
+ &_ldvr, work, &_lwork, info);
914
+ return 0;
915
+ }
916
+
917
+ /* Real space eigen systems info from dgeev/sgeev
918
+ * as numba_raw_rgeev but the allocation and error handling is done for the user.
919
+ * Args are as per LAPACK.
920
+ */
921
+ NUMBA_EXPORT_FUNC(int)
922
+ numba_ez_rgeev(char kind, char jobvl, char jobvr, Py_ssize_t n, void *a,
923
+ Py_ssize_t lda, void *wr, void *wi, void *vl, Py_ssize_t ldvl,
924
+ void *vr, Py_ssize_t ldvr)
925
+ {
926
+ F_INT info = 0;
927
+ F_INT lwork = -1;
928
+ F_INT _n, _lda, _ldvl, _ldvr;
929
+ size_t base_size = -1;
930
+ void * work = NULL;
931
+ all_dtypes stack_slot;
932
+
933
+ ENSURE_VALID_REAL_KIND(kind)
934
+
935
+ _n = (F_INT) n;
936
+ _lda = (F_INT) lda;
937
+ _ldvl = (F_INT) ldvl;
938
+ _ldvr = (F_INT) ldvr;
939
+
940
+ base_size = kind_size(kind);
941
+
942
+ work = &stack_slot;
943
+ numba_raw_rgeev(kind, jobvl, jobvr, _n, a, _lda, wr, wi, vl, _ldvl,
944
+ vr, _ldvr, work, lwork, &info);
945
+ CATCH_LAPACK_INVALID_ARG("numba_raw_rgeev", info);
946
+
947
+ lwork = cast_from_X(kind, work);
948
+ if (checked_PyMem_RawMalloc(&work, base_size * lwork))
949
+ {
950
+ return STATUS_ERROR;
951
+ }
952
+ numba_raw_rgeev(kind, jobvl, jobvr, _n, a, _lda, wr, wi, vl, _ldvl,
953
+ vr, _ldvr, work, lwork, &info);
954
+ PyMem_RawFree(work);
955
+
956
+ CATCH_LAPACK_INVALID_ARG("numba_raw_rgeev", info);
957
+
958
+ return (int)info;
959
+ }
960
+
961
+ /* Complex space eigen systems info from cgeev/zgeev
962
+ * Args are as per LAPACK.
963
+ */
964
+ static int
965
+ numba_raw_cgeev(char kind, char jobvl, char jobvr,
966
+ Py_ssize_t n, void *a, Py_ssize_t lda, void *w, void *vl,
967
+ Py_ssize_t ldvl, void *vr, Py_ssize_t ldvr, void *work,
968
+ Py_ssize_t lwork, void *rwork, F_INT *info)
969
+ {
970
+ void *raw_func = NULL;
971
+ F_INT _n, _lda, _ldvl, _ldvr, _lwork;
972
+
973
+ ENSURE_VALID_COMPLEX_KIND(kind)
974
+
975
+ _n = (F_INT) n;
976
+ _lda = (F_INT) lda;
977
+ _ldvl = (F_INT) ldvl;
978
+ _ldvr = (F_INT) ldvr;
979
+ _lwork = (F_INT) lwork;
980
+
981
+ switch (kind)
982
+ {
983
+ case 'c':
984
+ raw_func = get_clapack_cgeev();
985
+ break;
986
+ case 'z':
987
+ raw_func = get_clapack_zgeev();
988
+ break;
989
+ }
990
+ ENSURE_VALID_FUNC(raw_func)
991
+
992
+ (*(cgeev_t) raw_func)(&jobvl, &jobvr, &_n, a, &_lda, w, vl, &_ldvl, vr,
993
+ &_ldvr, work, &_lwork, rwork, info);
994
+ return 0;
995
+ }
996
+
997
+
998
+ /* Complex space eigen systems info from cgeev/zgeev
999
+ * as numba_raw_cgeev but the allocation and error handling is done for the user.
1000
+ * Args are as per LAPACK.
1001
+ */
1002
+ NUMBA_EXPORT_FUNC(int)
1003
+ numba_ez_cgeev(char kind, char jobvl, char jobvr, Py_ssize_t n, void *a,
1004
+ Py_ssize_t lda, void *w, void *vl, Py_ssize_t ldvl, void *vr,
1005
+ Py_ssize_t ldvr)
1006
+ {
1007
+ F_INT info = 0;
1008
+ F_INT lwork = -1;
1009
+ F_INT _n, _lda, _ldvl, _ldvr;
1010
+ size_t base_size = -1;
1011
+ all_dtypes stack_slot, wk;
1012
+ void * work = NULL;
1013
+ void * rwork = (void *)&wk;
1014
+
1015
+ ENSURE_VALID_COMPLEX_KIND(kind)
1016
+
1017
+ _n = (F_INT) n;
1018
+ _lda = (F_INT) lda;
1019
+ _ldvl = (F_INT) ldvl;
1020
+ _ldvr = (F_INT) ldvr;
1021
+
1022
+ base_size = kind_size(kind);
1023
+
1024
+ work = &stack_slot;
1025
+ numba_raw_cgeev(kind, jobvl, jobvr, n, a, lda, w, vl, ldvl,
1026
+ vr, ldvr, work, lwork, rwork, &info);
1027
+ CATCH_LAPACK_INVALID_ARG("numba_raw_cgeev", info);
1028
+
1029
+ lwork = cast_from_X(kind, work);
1030
+ if (checked_PyMem_RawMalloc((void**)&rwork, 2*n*base_size))
1031
+ {
1032
+ return STATUS_ERROR;
1033
+ }
1034
+ if (checked_PyMem_RawMalloc(&work, base_size * lwork))
1035
+ {
1036
+ PyMem_RawFree(rwork);
1037
+ return STATUS_ERROR;
1038
+ }
1039
+ numba_raw_cgeev(kind, jobvl, jobvr, _n, a, _lda, w, vl, _ldvl,
1040
+ vr, _ldvr, work, lwork, rwork, &info);
1041
+ PyMem_RawFree(work);
1042
+ PyMem_RawFree(rwork);
1043
+ CATCH_LAPACK_INVALID_ARG("numba_raw_cgeev", info);
1044
+
1045
+ return (int)info;
1046
+ }
1047
+
1048
+ /* real space symmetric eigen systems info from ssyevd/dsyevd */
1049
+ static int
1050
+ numba_raw_rsyevd(char kind, char jobz, char uplo, Py_ssize_t n, void *a,
1051
+ Py_ssize_t lda, void *w, void *work, Py_ssize_t lwork,
1052
+ F_INT *iwork, Py_ssize_t liwork, F_INT *info)
1053
+ {
1054
+ void *raw_func = NULL;
1055
+ F_INT _n, _lda, _lwork, _liwork;
1056
+
1057
+ ENSURE_VALID_REAL_KIND(kind)
1058
+
1059
+ switch (kind)
1060
+ {
1061
+ case 's':
1062
+ raw_func = get_clapack_ssyevd();
1063
+ break;
1064
+ case 'd':
1065
+ raw_func = get_clapack_dsyevd();
1066
+ break;
1067
+ }
1068
+ ENSURE_VALID_FUNC(raw_func)
1069
+
1070
+ _n = (F_INT) n;
1071
+ _lda = (F_INT) lda;
1072
+ _lwork = (F_INT) lwork;
1073
+ _liwork = (F_INT) liwork;
1074
+
1075
+ (*(xsyevd_t) raw_func)(&jobz, &uplo, &_n, a, &_lda, w, work, &_lwork, iwork, &_liwork, info);
1076
+ return 0;
1077
+ }
1078
+
1079
+ /* Real space eigen systems info from dsyevd/ssyevd
1080
+ * as numba_raw_rsyevd but the allocation and error handling is done for the user.
1081
+ * Args are as per LAPACK.
1082
+ */
1083
+ static int
1084
+ numba_ez_rsyevd(char kind, char jobz, char uplo, Py_ssize_t n, void *a, Py_ssize_t lda, void *w)
1085
+ {
1086
+ F_INT info = 0;
1087
+ F_INT lwork = -1, liwork=-1;
1088
+ F_INT _n, _lda;
1089
+ size_t base_size = -1;
1090
+ void *work = NULL;
1091
+ F_INT *iwork = NULL;
1092
+ all_dtypes stack_slot;
1093
+ int stack_int = -1;
1094
+
1095
+ ENSURE_VALID_REAL_KIND(kind)
1096
+
1097
+ _n = (F_INT) n;
1098
+ _lda = (F_INT) lda;
1099
+
1100
+ base_size = kind_size(kind);
1101
+
1102
+ work = &stack_slot;
1103
+ iwork = &stack_int;
1104
+ numba_raw_rsyevd(kind, jobz, uplo, _n, a, _lda, w, work, lwork, iwork, liwork, &info);
1105
+ CATCH_LAPACK_INVALID_ARG("numba_raw_rsyevd", info);
1106
+
1107
+ lwork = cast_from_X(kind, work);
1108
+ if (checked_PyMem_RawMalloc(&work, base_size * lwork))
1109
+ {
1110
+ return STATUS_ERROR;
1111
+ }
1112
+ liwork = *iwork;
1113
+ if (checked_PyMem_RawMalloc((void**)&iwork, base_size * liwork))
1114
+ {
1115
+ PyMem_RawFree(work);
1116
+ return STATUS_ERROR;
1117
+ }
1118
+ numba_raw_rsyevd(kind, jobz, uplo, _n, a, _lda, w, work, lwork, iwork, liwork, &info);
1119
+ PyMem_RawFree(work);
1120
+ PyMem_RawFree(iwork);
1121
+
1122
+ CATCH_LAPACK_INVALID_ARG("numba_raw_rsyevd", info);
1123
+
1124
+ return (int)info;
1125
+ }
1126
+
1127
+
1128
+ /* complex space symmetric eigen systems info from cheevd/zheevd*/
1129
+ static int
1130
+ numba_raw_cheevd(char kind, char jobz, char uplo, Py_ssize_t n, void *a,
1131
+ Py_ssize_t lda, void *w, void *work, Py_ssize_t lwork,
1132
+ void *rwork, Py_ssize_t lrwork, F_INT *iwork,
1133
+ Py_ssize_t liwork, F_INT *info)
1134
+ {
1135
+ void *raw_func = NULL;
1136
+ F_INT _n, _lda, _lwork, _lrwork, _liwork;
1137
+
1138
+ ENSURE_VALID_COMPLEX_KIND(kind)
1139
+
1140
+ switch (kind)
1141
+ {
1142
+ case 'c':
1143
+ raw_func = get_clapack_cheevd();
1144
+ break;
1145
+ case 'z':
1146
+ raw_func = get_clapack_zheevd();
1147
+ break;
1148
+ }
1149
+ ENSURE_VALID_FUNC(raw_func)
1150
+
1151
+ _n = (F_INT) n;
1152
+ _lda = (F_INT) lda;
1153
+ _lwork = (F_INT) lwork;
1154
+ _lrwork = (F_INT) lrwork;
1155
+ _liwork = (F_INT) liwork;
1156
+
1157
+ (*(xheevd_t) raw_func)(&jobz, &uplo, &_n, a, &_lda, w, work, &_lwork, rwork, &_lrwork, iwork, &_liwork, info);
1158
+ return 0;
1159
+ }
1160
+
1161
+ /* complex space eigen systems info from cheevd/zheevd
1162
+ * as numba_raw_cheevd but the allocation and error handling is done for the user.
1163
+ * Args are as per LAPACK.
1164
+ */
1165
+ static int
1166
+ numba_ez_cheevd(char kind, char jobz, char uplo, Py_ssize_t n, void *a, Py_ssize_t lda, void *w)
1167
+ {
1168
+ F_INT info = 0;
1169
+ F_INT lwork = -1, lrwork = -1, liwork=-1;
1170
+ F_INT _n, _lda;
1171
+ size_t base_size = -1, underlying_float_size = -1;
1172
+ void *work = NULL, *rwork = NULL;
1173
+ F_INT *iwork = NULL;
1174
+ all_dtypes stack_slot1, stack_slot2;
1175
+ char uf_kind;
1176
+ int stack_int = -1;
1177
+
1178
+ ENSURE_VALID_COMPLEX_KIND(kind)
1179
+
1180
+ _n = (F_INT) n;
1181
+ _lda = (F_INT) lda;
1182
+
1183
+ base_size = kind_size(kind);
1184
+ uf_kind = underlying_float_kind(kind);
1185
+ underlying_float_size = kind_size(uf_kind);
1186
+
1187
+ work = &stack_slot1;
1188
+ rwork = &stack_slot2;
1189
+ iwork = &stack_int;
1190
+ numba_raw_cheevd(kind, jobz, uplo, _n, a, _lda, w, work, lwork, rwork, lrwork, iwork, liwork, &info);
1191
+ CATCH_LAPACK_INVALID_ARG("numba_raw_cheevd", info);
1192
+
1193
+ lwork = cast_from_X(uf_kind, work);
1194
+ if (checked_PyMem_RawMalloc(&work, base_size * lwork))
1195
+ {
1196
+ return STATUS_ERROR;
1197
+ }
1198
+
1199
+ lrwork = cast_from_X(uf_kind, rwork);
1200
+ if (checked_PyMem_RawMalloc(&rwork, underlying_float_size * lrwork))
1201
+ {
1202
+ PyMem_RawFree(work);
1203
+ return STATUS_ERROR;
1204
+ }
1205
+
1206
+ liwork = *iwork;
1207
+ if (checked_PyMem_RawMalloc((void**)&iwork, base_size * liwork))
1208
+ {
1209
+ PyMem_RawFree(work);
1210
+ PyMem_RawFree(rwork);
1211
+ return STATUS_ERROR;
1212
+ }
1213
+ numba_raw_cheevd(kind, jobz, uplo, _n, a, _lda, w, work, lwork, rwork, lrwork, iwork, liwork, &info);
1214
+ PyMem_RawFree(work);
1215
+ PyMem_RawFree(rwork);
1216
+ PyMem_RawFree(iwork);
1217
+
1218
+ CATCH_LAPACK_INVALID_ARG("numba_raw_cheevd", info);
1219
+
1220
+ return (int)info;
1221
+ }
1222
+
1223
+ /* Hermitian eigenvalue systems info from *syevd and *heevd.
1224
+ * This routine hides the type and general complexity involved with making the
1225
+ * calls. The work space computation and error handling etc is hidden.
1226
+ * Args are as per LAPACK.
1227
+ */
1228
+ NUMBA_EXPORT_FUNC(int)
1229
+ numba_ez_xxxevd(char kind, char jobz, char uplo, Py_ssize_t n, void *a, Py_ssize_t lda, void *w)
1230
+ {
1231
+ ENSURE_VALID_KIND(kind)
1232
+
1233
+ switch (kind)
1234
+ {
1235
+ case 's':
1236
+ case 'd':
1237
+ return numba_ez_rsyevd(kind, jobz, uplo, n, a, lda, w);
1238
+ case 'c':
1239
+ case 'z':
1240
+ return numba_ez_cheevd(kind, jobz, uplo, n, a, lda, w);
1241
+ }
1242
+ return STATUS_ERROR; /* unreachable */
1243
+ }
1244
+
1245
+ /* Real space svd systems info from dgesdd/sgesdd
1246
+ * Args are as per LAPACK.
1247
+ */
1248
+ static int
1249
+ numba_raw_rgesdd(char kind, char jobz, Py_ssize_t m, Py_ssize_t n, void *a,
1250
+ Py_ssize_t lda, void *s, void *u, Py_ssize_t ldu, void *vt,
1251
+ Py_ssize_t ldvt, void *work, Py_ssize_t lwork,
1252
+ F_INT *iwork, F_INT *info)
1253
+ {
1254
+ void *raw_func = NULL;
1255
+ F_INT _m, _n, _lda, _ldu, _ldvt, _lwork;
1256
+
1257
+ ENSURE_VALID_REAL_KIND(kind)
1258
+
1259
+ _m = (F_INT) m;
1260
+ _n = (F_INT) n;
1261
+ _lda = (F_INT) lda;
1262
+ _ldu = (F_INT) ldu;
1263
+ _ldvt = (F_INT) ldvt;
1264
+ _lwork = (F_INT) lwork;
1265
+
1266
+ switch (kind)
1267
+ {
1268
+ case 's':
1269
+ raw_func = get_clapack_sgesdd();
1270
+ break;
1271
+ case 'd':
1272
+ raw_func = get_clapack_dgesdd();
1273
+ break;
1274
+ }
1275
+ ENSURE_VALID_FUNC(raw_func)
1276
+
1277
+ (*(rgesdd_t) raw_func)(&jobz, &_m, &_n, a, &_lda, s, u, &_ldu, vt, &_ldvt,
1278
+ work, &_lwork, iwork, info);
1279
+ return 0;
1280
+ }
1281
+
1282
+ /* Real space svd info from dgesdd/sgesdd.
1283
+ * As numba_raw_rgesdd but the allocation and error handling is done for the
1284
+ * user.
1285
+ * Args are as per LAPACK.
1286
+ */
1287
+ static int
1288
+ numba_ez_rgesdd(char kind, char jobz, Py_ssize_t m, Py_ssize_t n, void *a,
1289
+ Py_ssize_t lda, void *s, void *u, Py_ssize_t ldu, void *vt,
1290
+ Py_ssize_t ldvt)
1291
+ {
1292
+ F_INT info = 0;
1293
+ Py_ssize_t minmn = -1;
1294
+ Py_ssize_t lwork = -1;
1295
+ all_dtypes stack_slot, wk;
1296
+ size_t base_size = -1;
1297
+ F_INT *iwork = (F_INT *)&wk;
1298
+ void *work = NULL;
1299
+
1300
+ ENSURE_VALID_REAL_KIND(kind)
1301
+
1302
+ base_size = kind_size(kind);
1303
+
1304
+ work = &stack_slot;
1305
+
1306
+ /* Compute optimal work size (lwork) */
1307
+ numba_raw_rgesdd(kind, jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work,
1308
+ lwork, iwork, &info);
1309
+ CATCH_LAPACK_INVALID_ARG("numba_raw_rgesdd", info);
1310
+
1311
+ /* Allocate work array */
1312
+ lwork = cast_from_X(kind, work);
1313
+ if (checked_PyMem_RawMalloc(&work, base_size * lwork))
1314
+ return -1;
1315
+ minmn = m > n ? n : m;
1316
+ if (checked_PyMem_RawMalloc((void**) &iwork, 8 * minmn * sizeof(F_INT)))
1317
+ {
1318
+ PyMem_RawFree(work);
1319
+ return STATUS_ERROR;
1320
+ }
1321
+ numba_raw_rgesdd(kind, jobz, m, n, a, lda, s, u ,ldu, vt, ldvt, work, lwork,
1322
+ iwork, &info);
1323
+ PyMem_RawFree(work);
1324
+ PyMem_RawFree(iwork);
1325
+ CATCH_LAPACK_INVALID_ARG("numba_raw_rgesdd", info);
1326
+
1327
+ return (int)info;
1328
+ }
1329
+
1330
+ /* Complex space svd systems info from cgesdd/zgesdd
1331
+ * Args are as per LAPACK.
1332
+ */
1333
+ static int
1334
+ numba_raw_cgesdd(char kind, char jobz, Py_ssize_t m, Py_ssize_t n, void *a,
1335
+ Py_ssize_t lda, void *s, void *u, Py_ssize_t ldu, void *vt,
1336
+ Py_ssize_t ldvt, void *work, Py_ssize_t lwork, void *rwork,
1337
+ F_INT *iwork, F_INT *info)
1338
+ {
1339
+ void *raw_func = NULL;
1340
+ F_INT _m, _n, _lda, _ldu, _ldvt, _lwork;
1341
+
1342
+ ENSURE_VALID_COMPLEX_KIND(kind)
1343
+
1344
+ _m = (F_INT) m;
1345
+ _n = (F_INT) n;
1346
+ _lda = (F_INT) lda;
1347
+ _ldu = (F_INT) ldu;
1348
+ _ldvt = (F_INT) ldvt;
1349
+ _lwork = (F_INT) lwork;
1350
+
1351
+ switch (kind)
1352
+ {
1353
+ case 'c':
1354
+ raw_func = get_clapack_cgesdd();
1355
+ break;
1356
+ case 'z':
1357
+ raw_func = get_clapack_zgesdd();
1358
+ break;
1359
+ }
1360
+ ENSURE_VALID_FUNC(raw_func)
1361
+
1362
+ (*(cgesdd_t) raw_func)(&jobz, &_m, &_n, a, &_lda, s, u, &_ldu, vt, &_ldvt,
1363
+ work, &_lwork, rwork, iwork, info);
1364
+ return 0;
1365
+ }
1366
+
1367
+ /* complex space svd info from cgesdd/zgesdd.
1368
+ * As numba_raw_cgesdd but the allocation and error handling is done for the
1369
+ * user.
1370
+ * Args are as per LAPACK.
1371
+ */
1372
+ static int
1373
+ numba_ez_cgesdd(char kind, char jobz, Py_ssize_t m, Py_ssize_t n, void *a,
1374
+ Py_ssize_t lda, void *s, void *u, Py_ssize_t ldu, void *vt,
1375
+ Py_ssize_t ldvt)
1376
+ {
1377
+ F_INT info = 0;
1378
+ Py_ssize_t lwork = -1;
1379
+ Py_ssize_t lrwork = -1;
1380
+ Py_ssize_t minmn = -1;
1381
+ Py_ssize_t tmp1, tmp2;
1382
+ Py_ssize_t maxmn = -1;
1383
+ size_t real_base_size = -1;
1384
+ size_t complex_base_size = -1;
1385
+ all_dtypes stack_slot, wk1, wk2;
1386
+ void *work = NULL;
1387
+ void *rwork = (void *)&wk1;
1388
+ F_INT *iwork = (F_INT *)&wk2;
1389
+
1390
+ ENSURE_VALID_COMPLEX_KIND(kind)
1391
+
1392
+ switch (kind)
1393
+ {
1394
+ case 'c':
1395
+ real_base_size = sizeof(float);
1396
+ complex_base_size = sizeof(npy_complex64);
1397
+ break;
1398
+ case 'z':
1399
+ real_base_size = sizeof(double);
1400
+ complex_base_size = sizeof(npy_complex128);
1401
+ break;
1402
+ default:
1403
+ {
1404
+ PyGILState_STATE st = PyGILState_Ensure();
1405
+ PyErr_SetString(PyExc_ValueError,\
1406
+ "Invalid kind in numba_ez_rgesdd");
1407
+ PyGILState_Release(st);
1408
+ }
1409
+ return STATUS_ERROR;
1410
+ }
1411
+
1412
+ work = &stack_slot;
1413
+
1414
+ /* Compute optimal work size (lwork) */
1415
+ numba_raw_cgesdd(kind, jobz, m, n, a, lda, s, u ,ldu, vt, ldvt, work, lwork,
1416
+ rwork, iwork, &info);
1417
+ CATCH_LAPACK_INVALID_ARG("numba_raw_cgesdd", info);
1418
+
1419
+ /* Allocate work array */
1420
+ lwork = cast_from_X(kind, work);
1421
+ if (checked_PyMem_RawMalloc(&work, complex_base_size * lwork))
1422
+ return STATUS_ERROR;
1423
+
1424
+ minmn = m > n ? n : m;
1425
+ if (jobz == 'n')
1426
+ {
1427
+ lrwork = 7 * minmn;
1428
+ }
1429
+ else
1430
+ {
1431
+ maxmn = m > n ? m : n;
1432
+ tmp1 = 5 * minmn + 7;
1433
+ tmp2 = 2 * maxmn + 2 * minmn + 1;
1434
+ lrwork = minmn * (tmp1 > tmp2 ? tmp1: tmp2);
1435
+ }
1436
+
1437
+ if (checked_PyMem_RawMalloc(&rwork,
1438
+ real_base_size * (lrwork > 1 ? lrwork : 1)))
1439
+ {
1440
+ PyMem_RawFree(work);
1441
+ return STATUS_ERROR;
1442
+ }
1443
+ if (checked_PyMem_RawMalloc((void **) &iwork,
1444
+ 8 * minmn * sizeof(F_INT)))
1445
+ {
1446
+ PyMem_RawFree(work);
1447
+ PyMem_RawFree(rwork);
1448
+ return STATUS_ERROR;
1449
+ }
1450
+ numba_raw_cgesdd(kind, jobz, m, n, a, lda, s, u ,ldu, vt, ldvt, work, lwork,
1451
+ rwork, iwork, &info);
1452
+ PyMem_RawFree(work);
1453
+ PyMem_RawFree(rwork);
1454
+ PyMem_RawFree(iwork);
1455
+ CATCH_LAPACK_INVALID_ARG("numba_raw_cgesdd", info);
1456
+
1457
+ return (int)info;
1458
+ }
1459
+
1460
+
1461
+ /* SVD systems info from *gesdd.
1462
+ * This routine hides the type and general complexity involved with making the
1463
+ * calls to *gesdd. The work space computation and error handling etc is hidden.
1464
+ * Args are as per LAPACK.
1465
+ */
1466
+ NUMBA_EXPORT_FUNC(int)
1467
+ numba_ez_gesdd(char kind, char jobz, Py_ssize_t m, Py_ssize_t n, void *a,
1468
+ Py_ssize_t lda, void *s, void *u, Py_ssize_t ldu, void *vt,
1469
+ Py_ssize_t ldvt)
1470
+ {
1471
+ ENSURE_VALID_KIND(kind)
1472
+
1473
+ switch (kind)
1474
+ {
1475
+ case 's':
1476
+ case 'd':
1477
+ return numba_ez_rgesdd(kind, jobz, m, n, a, lda, s, u, ldu, vt,
1478
+ ldvt);
1479
+ case 'c':
1480
+ case 'z':
1481
+ return numba_ez_cgesdd(kind, jobz, m, n, a, lda, s, u, ldu, vt,
1482
+ ldvt);
1483
+ }
1484
+ return STATUS_ERROR; /* unreachable */
1485
+ }
1486
+
1487
+
1488
+ /*
1489
+ * Compute the QR factorization of a matrix.
1490
+ * Return -1 on internal error, 0 on success, > 0 on failure.
1491
+ */
1492
+ static int
1493
+ numba_raw_xgeqrf(char kind, Py_ssize_t m, Py_ssize_t n, void *a, Py_ssize_t
1494
+ lda, void *tau, void *work, Py_ssize_t lwork, F_INT *info)
1495
+ {
1496
+ void *raw_func = NULL;
1497
+ F_INT _m, _n, _lda, _lwork;
1498
+
1499
+ ENSURE_VALID_KIND(kind)
1500
+
1501
+ switch (kind)
1502
+ {
1503
+ case 's':
1504
+ raw_func = get_clapack_sgeqrf();
1505
+ break;
1506
+ case 'd':
1507
+ raw_func = get_clapack_dgeqrf();
1508
+ break;
1509
+ case 'c':
1510
+ raw_func = get_clapack_cgeqrf();
1511
+ break;
1512
+ case 'z':
1513
+ raw_func = get_clapack_zgeqrf();
1514
+ break;
1515
+ }
1516
+ ENSURE_VALID_FUNC(raw_func)
1517
+
1518
+ _m = (F_INT) m;
1519
+ _n = (F_INT) n;
1520
+ _lda = (F_INT) lda;
1521
+ _lwork = (F_INT) lwork;
1522
+
1523
+ (*(xgeqrf_t) raw_func)(&_m, &_n, a, &_lda, tau, work, &_lwork, info);
1524
+ return 0;
1525
+ }
1526
+
1527
+ /*
1528
+ * Compute the QR factorization of a matrix.
1529
+ * This routine hides the type and general complexity involved with making the
1530
+ * xgeqrf calls. The work space computation and error handling etc is hidden.
1531
+ * Args are as per LAPACK.
1532
+ */
1533
+ NUMBA_EXPORT_FUNC(int)
1534
+ numba_ez_geqrf(char kind, Py_ssize_t m, Py_ssize_t n, void *a, Py_ssize_t
1535
+ lda, void *tau)
1536
+ {
1537
+ F_INT info = 0;
1538
+ Py_ssize_t lwork = -1;
1539
+ size_t base_size = -1;
1540
+ all_dtypes stack_slot;
1541
+ void *work = NULL;
1542
+
1543
+ base_size = kind_size(kind);
1544
+
1545
+ work = &stack_slot;
1546
+
1547
+ /* Compute optimal work size (lwork) */
1548
+ numba_raw_xgeqrf(kind, m, n, a, lda, tau, work, lwork, &info);
1549
+ CATCH_LAPACK_INVALID_ARG("numba_raw_xgeqrf", info);
1550
+
1551
+ /* Allocate work array */
1552
+ lwork = cast_from_X(kind, work);
1553
+ if (checked_PyMem_RawMalloc(&work, base_size * lwork))
1554
+ return STATUS_ERROR;
1555
+
1556
+ numba_raw_xgeqrf(kind, m, n, a, lda, tau, work, lwork, &info);
1557
+ PyMem_RawFree(work);
1558
+ CATCH_LAPACK_INVALID_ARG("numba_raw_xgeqrf", info);
1559
+
1560
+ return 0; /* info cannot be >0 */
1561
+
1562
+ }
1563
+
1564
+
1565
+ /*
1566
+ * Compute the orthogonal Q matrix (in QR) from elementary relectors.
1567
+ */
1568
+ static int
1569
+ numba_raw_xxxgqr(char kind, Py_ssize_t m, Py_ssize_t n, Py_ssize_t k, void *a,
1570
+ Py_ssize_t lda, void *tau, void * work, Py_ssize_t lwork, F_INT *info)
1571
+ {
1572
+ void *raw_func = NULL;
1573
+ F_INT _m, _n, _k, _lda, _lwork;
1574
+
1575
+ ENSURE_VALID_KIND(kind)
1576
+
1577
+ switch (kind)
1578
+ {
1579
+ case 's':
1580
+ raw_func = get_clapack_sorgqr();
1581
+ break;
1582
+ case 'd':
1583
+ raw_func = get_clapack_dorgqr();
1584
+ break;
1585
+ case 'c':
1586
+ raw_func = get_clapack_cungqr();
1587
+ break;
1588
+ case 'z':
1589
+ raw_func = get_clapack_zungqr();
1590
+ break;
1591
+ }
1592
+ ENSURE_VALID_FUNC(raw_func)
1593
+
1594
+ _m = (F_INT) m;
1595
+ _n = (F_INT) n;
1596
+ _k = (F_INT) k;
1597
+ _lda = (F_INT) lda;
1598
+ _lwork = (F_INT) lwork;
1599
+
1600
+ (*(xxxgqr_t) raw_func)(&_m, &_n, &_k, a, &_lda, tau, work, &_lwork, info);
1601
+ return 0;
1602
+ }
1603
+
1604
+
1605
+ /*
1606
+ * Compute the orthogonal Q matrix (in QR) from elementary reflectors.
1607
+ * This routine hides the type and general complexity involved with making the
1608
+ * x{or,un}qrf calls. The work space computation and error handling etc is
1609
+ * hidden. Args are as per LAPACK.
1610
+ */
1611
+ NUMBA_EXPORT_FUNC(int)
1612
+ numba_ez_xxgqr(char kind, Py_ssize_t m, Py_ssize_t n, Py_ssize_t k, void *a,
1613
+ Py_ssize_t lda, void *tau)
1614
+ {
1615
+ F_INT info = 0;
1616
+ Py_ssize_t lwork = -1;
1617
+ size_t base_size = -1;
1618
+ all_dtypes stack_slot;
1619
+ void *work = NULL;
1620
+
1621
+ work = &stack_slot;
1622
+
1623
+ /* Compute optimal work size (lwork) */
1624
+ numba_raw_xxxgqr(kind, m, n, k, a, lda, tau, work, lwork, &info);
1625
+ CATCH_LAPACK_INVALID_ARG("numba_raw_xxxgqr", info);
1626
+
1627
+ base_size = kind_size(kind);
1628
+
1629
+ /* Allocate work array */
1630
+ lwork = cast_from_X(kind, work);
1631
+ if (checked_PyMem_RawMalloc(&work, base_size * lwork))
1632
+ return STATUS_ERROR;
1633
+
1634
+ numba_raw_xxxgqr(kind, m, n, k, a, lda, tau, work, lwork, &info);
1635
+ PyMem_RawFree(work);
1636
+ CATCH_LAPACK_INVALID_ARG("numba_raw_xxxgqr", info);
1637
+
1638
+ return 0; /* info cannot be >0 */
1639
+
1640
+ }
1641
+
1642
+
1643
+ /*
1644
+ * Compute the minimum-norm solution to a real linear least squares problem.
1645
+ */
1646
+ static int
1647
+ numba_raw_rgelsd(char kind, Py_ssize_t m, Py_ssize_t n, Py_ssize_t nrhs,
1648
+ void *a, Py_ssize_t lda, void *b, Py_ssize_t ldb, void *S,
1649
+ void * rcond, Py_ssize_t * rank, void * work,
1650
+ Py_ssize_t lwork, F_INT *iwork, F_INT *info)
1651
+ {
1652
+ void *raw_func = NULL;
1653
+ F_INT _m, _n, _nrhs, _lda, _ldb, _rank, _lwork;
1654
+
1655
+ ENSURE_VALID_REAL_KIND(kind)
1656
+
1657
+ switch (kind)
1658
+ {
1659
+ case 's':
1660
+ raw_func = get_clapack_sgelsd();
1661
+ break;
1662
+ case 'd':
1663
+ raw_func = get_clapack_dgelsd();
1664
+ break;
1665
+ }
1666
+ ENSURE_VALID_FUNC(raw_func)
1667
+
1668
+ _m = (F_INT) m;
1669
+ _n = (F_INT) n;
1670
+ _nrhs = (F_INT) nrhs;
1671
+ _lda = (F_INT) lda;
1672
+ _ldb = (F_INT) ldb;
1673
+ _lwork = (F_INT) lwork;
1674
+
1675
+ (*(rgelsd_t) raw_func)(&_m, &_n, &_nrhs, a, &_lda, b, &_ldb, S, rcond,
1676
+ &_rank, work, &_lwork, iwork, info);
1677
+ *rank = (Py_ssize_t) _rank;
1678
+ return 0;
1679
+ }
1680
+
1681
+ /*
1682
+ * Compute the minimum-norm solution to a real linear least squares problem.
1683
+ * This routine hides the type and general complexity involved with making the
1684
+ * {s,d}gelsd calls. The work space computation and error handling etc is
1685
+ * hidden. Args are as per LAPACK.
1686
+ */
1687
+ static int
1688
+ numba_ez_rgelsd(char kind, Py_ssize_t m, Py_ssize_t n, Py_ssize_t nrhs,
1689
+ void *a, Py_ssize_t lda, void *b, Py_ssize_t ldb, void *S,
1690
+ double rcond, Py_ssize_t * rank)
1691
+ {
1692
+ F_INT info = 0;
1693
+ Py_ssize_t lwork = -1;
1694
+ size_t base_size = -1;
1695
+ all_dtypes stack_slot;
1696
+ void *work = NULL, *rcond_cast = NULL;
1697
+ F_INT *iwork = NULL;
1698
+ F_INT iwork_tmp;
1699
+ float tmpf;
1700
+
1701
+ ENSURE_VALID_REAL_KIND(kind)
1702
+
1703
+ base_size = kind_size(kind);
1704
+
1705
+ work = &stack_slot;
1706
+ rcond_cast = work; /* stop checks on null ptr complaining */
1707
+
1708
+ /* Compute optimal work size (lwork) */
1709
+ numba_raw_rgelsd(kind, m, n, nrhs, a, lda, b, ldb, S, rcond_cast, rank,
1710
+ work, lwork, &iwork_tmp, &info);
1711
+ CATCH_LAPACK_INVALID_ARG("numba_raw_rgelsd", info);
1712
+
1713
+ /* Allocate work array */
1714
+ lwork = cast_from_X(kind, work);
1715
+ if (checked_PyMem_RawMalloc(&work, base_size * lwork))
1716
+ return STATUS_ERROR;
1717
+
1718
+ /* Allocate iwork array */
1719
+ if (checked_PyMem_RawMalloc((void **)&iwork, sizeof(F_INT) * iwork_tmp))
1720
+ {
1721
+ PyMem_RawFree(work);
1722
+ return STATUS_ERROR;
1723
+ }
1724
+
1725
+ /* cast rcond to the right type */
1726
+ switch (kind)
1727
+ {
1728
+ case 's':
1729
+ tmpf = (float)rcond;
1730
+ rcond_cast = (void * )&tmpf;
1731
+ break;
1732
+ case 'd':
1733
+ rcond_cast = (void * )&rcond;
1734
+ break;
1735
+ }
1736
+
1737
+ numba_raw_rgelsd(kind, m, n, nrhs, a, lda, b, ldb, S, rcond_cast, rank,
1738
+ work, lwork, iwork, &info);
1739
+ PyMem_RawFree(work);
1740
+ PyMem_RawFree(iwork);
1741
+ CATCH_LAPACK_INVALID_ARG("numba_raw_rgelsd", info);
1742
+
1743
+ return (int)info;
1744
+ }
1745
+
1746
+
1747
+ /*
1748
+ * Compute the minimum-norm solution to a complex linear least squares problem.
1749
+ */
1750
+ static int
1751
+ numba_raw_cgelsd(char kind, Py_ssize_t m, Py_ssize_t n, Py_ssize_t nrhs,
1752
+ void *a, Py_ssize_t lda, void *b, Py_ssize_t ldb, void *S,
1753
+ void *rcond, Py_ssize_t * rank, void * work,
1754
+ Py_ssize_t lwork, void * rwork, F_INT *iwork, F_INT *info)
1755
+ {
1756
+ void *raw_func = NULL;
1757
+ F_INT _m, _n, _nrhs, _lda, _ldb, _rank, _lwork;
1758
+
1759
+ ENSURE_VALID_COMPLEX_KIND(kind)
1760
+
1761
+ switch (kind)
1762
+ {
1763
+ case 'c':
1764
+ raw_func = get_clapack_cgelsd();
1765
+ break;
1766
+ case 'z':
1767
+ raw_func = get_clapack_zgelsd();
1768
+ break;
1769
+ }
1770
+ ENSURE_VALID_FUNC(raw_func)
1771
+
1772
+ _m = (F_INT) m;
1773
+ _n = (F_INT) n;
1774
+ _nrhs = (F_INT) nrhs;
1775
+ _lda = (F_INT) lda;
1776
+ _ldb = (F_INT) ldb;
1777
+ _lwork = (F_INT) lwork;
1778
+
1779
+ (*(cgelsd_t) raw_func)(&_m, &_n, &_nrhs, a, &_lda, b, &_ldb, S, rcond,
1780
+ &_rank, work, &_lwork, rwork, iwork, info);
1781
+ *rank = (Py_ssize_t) _rank;
1782
+ return 0;
1783
+ }
1784
+
1785
+
1786
+ /*
1787
+ * Compute the minimum-norm solution to a complex linear least squares problem.
1788
+ * This routine hides the type and general complexity involved with making the
1789
+ * {c,z}gelsd calls. The work space computation and error handling etc is
1790
+ * hidden. Args are as per LAPACK.
1791
+ */
1792
+ static int
1793
+ numba_ez_cgelsd(char kind, Py_ssize_t m, Py_ssize_t n, Py_ssize_t nrhs,
1794
+ void *a, Py_ssize_t lda, void *b, Py_ssize_t ldb, void *S,
1795
+ double rcond, Py_ssize_t * rank)
1796
+ {
1797
+ F_INT info = 0;
1798
+ Py_ssize_t lwork = -1;
1799
+ size_t base_size = -1;
1800
+ all_dtypes stack_slot1, stack_slot2;
1801
+ size_t real_base_size = 0;
1802
+ void *work = NULL, *rwork = NULL, *rcond_cast = NULL;
1803
+ Py_ssize_t lrwork;
1804
+ F_INT *iwork = NULL;
1805
+ F_INT iwork_tmp;
1806
+ char real_kind = '-';
1807
+ float tmpf;
1808
+
1809
+ ENSURE_VALID_COMPLEX_KIND(kind)
1810
+
1811
+ base_size = kind_size(kind);
1812
+
1813
+ work = &stack_slot1;
1814
+ rwork = &stack_slot2;
1815
+ rcond_cast = work; /* stop checks on null ptr complaining */
1816
+
1817
+ /* Compute optimal work size */
1818
+ numba_raw_cgelsd(kind, m, n, nrhs, a, lda, b, ldb, S, rcond_cast, rank,
1819
+ work, lwork, rwork, &iwork_tmp, &info);
1820
+ CATCH_LAPACK_INVALID_ARG("numba_raw_cgelsd", info);
1821
+
1822
+ /* Allocate work array */
1823
+ lwork = cast_from_X(kind, work);
1824
+ if (checked_PyMem_RawMalloc(&work, base_size * lwork))
1825
+ return STATUS_ERROR;
1826
+
1827
+ /* Allocate iwork array */
1828
+ if (checked_PyMem_RawMalloc((void **)&iwork, sizeof(F_INT) * iwork_tmp))
1829
+ {
1830
+ PyMem_RawFree(work);
1831
+ return STATUS_ERROR;
1832
+ }
1833
+
1834
+ switch (kind)
1835
+ {
1836
+ case 'c':
1837
+ real_kind = 's';
1838
+ tmpf = (float)rcond;
1839
+ rcond_cast = (void * )&tmpf;
1840
+ break;
1841
+ case 'z':
1842
+ real_kind = 'd';
1843
+ rcond_cast = (void * )&rcond;
1844
+ break;
1845
+ }
1846
+
1847
+ real_base_size = kind_size(real_kind);
1848
+
1849
+ lrwork = cast_from_X(real_kind, rwork);
1850
+ if (checked_PyMem_RawMalloc((void **)&rwork, real_base_size * lrwork))
1851
+ {
1852
+ PyMem_RawFree(work);
1853
+ PyMem_RawFree(iwork);
1854
+ return STATUS_ERROR;
1855
+ }
1856
+
1857
+ numba_raw_cgelsd(kind, m, n, nrhs, a, lda, b, ldb, S, rcond_cast, rank,
1858
+ work, lwork, rwork, iwork, &info);
1859
+ PyMem_RawFree(work);
1860
+ PyMem_RawFree(rwork);
1861
+ PyMem_RawFree(iwork);
1862
+ CATCH_LAPACK_INVALID_ARG("numba_raw_cgelsd", info);
1863
+
1864
+ return (int)info;
1865
+ }
1866
+
1867
+
1868
+ /*
1869
+ * Compute the minimum-norm solution to a linear least squares problems.
1870
+ * This routine hides the type and general complexity involved with making the
1871
+ * calls to *gelsd. The work space computation and error handling etc is hidden.
1872
+ * Args are as per LAPACK.
1873
+ */
1874
+ NUMBA_EXPORT_FUNC(int)
1875
+ numba_ez_gelsd(char kind, Py_ssize_t m, Py_ssize_t n, Py_ssize_t nrhs,
1876
+ void *a, Py_ssize_t lda, void *b, Py_ssize_t ldb, void *S,
1877
+ double rcond, Py_ssize_t * rank)
1878
+ {
1879
+ ENSURE_VALID_KIND(kind)
1880
+
1881
+ switch (kind)
1882
+ {
1883
+ case 's':
1884
+ case 'd':
1885
+ return numba_ez_rgelsd(kind, m, n, nrhs, a, lda, b, ldb, S, rcond,
1886
+ rank);
1887
+ case 'c':
1888
+ case 'z':
1889
+ return numba_ez_cgelsd(kind, m, n, nrhs, a, lda, b, ldb, S, rcond,
1890
+ rank);
1891
+ }
1892
+ return STATUS_ERROR; /* unreachable */
1893
+ }
1894
+
1895
+
1896
+ /*
1897
+ * Compute the solution to a system of linear equations
1898
+ */
1899
+ NUMBA_EXPORT_FUNC(int)
1900
+ numba_xgesv(char kind, Py_ssize_t n, Py_ssize_t nrhs, void *a, Py_ssize_t lda,
1901
+ F_INT *ipiv, void *b, Py_ssize_t ldb)
1902
+ {
1903
+ void *raw_func = NULL;
1904
+ F_INT _n, _nrhs, _lda, _ldb, info;
1905
+
1906
+ ENSURE_VALID_KIND(kind)
1907
+
1908
+ switch (kind)
1909
+ {
1910
+ case 's':
1911
+ raw_func = get_clapack_sgesv();
1912
+ break;
1913
+ case 'd':
1914
+ raw_func = get_clapack_dgesv();
1915
+ break;
1916
+ case 'c':
1917
+ raw_func = get_clapack_cgesv();
1918
+ break;
1919
+ case 'z':
1920
+ raw_func = get_clapack_zgesv();
1921
+ break;
1922
+ }
1923
+
1924
+ ENSURE_VALID_FUNC(raw_func)
1925
+
1926
+ _n = (F_INT) n;
1927
+ _nrhs = (F_INT) nrhs;
1928
+ _lda = (F_INT) lda;
1929
+ _ldb = (F_INT) ldb;
1930
+
1931
+ (*(xgesv_t) raw_func)(&_n, &_nrhs, a, &_lda, ipiv, b, &_ldb, &info);
1932
+ CATCH_LAPACK_INVALID_ARG("xgesv", info);
1933
+
1934
+ return (int)info;
1935
+ }
1936
+
1937
+ /* undef defines and macros */
1938
+ #undef STATUS_SUCCESS
1939
+ #undef STATUS_ERROR
1940
+ #undef ENSURE_VALID_KIND
1941
+ #undef ENSURE_VALID_REAL_KIND
1942
+ #undef ENSURE_VALID_COMPLEX_KIND
1943
+ #undef ENSURE_VALID_FUNC
1944
+ #undef F_INT
1945
+ #undef EMIT_GET_CLAPACK_FUNC
1946
+ #undef CATCH_LAPACK_INVALID_ARG
lib/python3.10/site-packages/numba/_numba_common.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NUMBA_COMMON_H_
2
+ #define NUMBA_COMMON_H_
3
+
4
+ /* __has_attribute() is a clang / gcc-5 macro */
5
+ #ifndef __has_attribute
6
+ # define __has_attribute(x) 0
7
+ #endif
8
+
9
+ /* This attribute marks symbols that can be shared across C objects
10
+ * but are not exposed outside of a shared library or executable.
11
+ * Note this is default behaviour for global symbols under Windows.
12
+ */
13
+ #if defined(_MSC_VER)
14
+ #define VISIBILITY_HIDDEN
15
+ #define VISIBILITY_GLOBAL __declspec(dllexport)
16
+ #elif (__has_attribute(visibility) || (defined(__GNUC__) && __GNUC__ >= 4))
17
+ #define VISIBILITY_HIDDEN __attribute__ ((visibility("hidden")))
18
+ #define VISIBILITY_GLOBAL __attribute__ ((visibility("default")))
19
+ #else
20
+ #define VISIBILITY_HIDDEN
21
+ #define VISIBILITY_GLOBAL
22
+ #endif
23
+
24
+ /*
25
+ * Numba's version of the PyArray_DescrCheck macro from NumPy, use it as a
26
+ * direct replacement of NumPy's PyArray_DescrCheck to ensure binary
27
+ * compatibility.
28
+ *
29
+ * Details of why this is needed:
30
+ * NumPy 1.18 changed the definition of the PyArray_DescrCheck macro here:
31
+ * https://github.com/numpy/numpy/commit/6108b5d1e138d07e3c9f2a4e3b1933749ad0e698
32
+ * the result of this being that building against NumPy <1.18 would prevent
33
+ * Numba running against NumPy >= 1.20 as noted here:
34
+ * https://github.com/numba/numba/issues/6041#issuecomment-665132199
35
+ *
36
+ * This macro definition is copied from:
37
+ * https://github.com/numpy/numpy/commit/6108b5d1e138d07e3c9f2a4e3b1933749ad0e698#diff-ad2213da23136c5fc5883d9eb2d88666R26
38
+ *
39
+ * NOTE: This is the NumPy 1.18 and above version of the macro.
40
+ */
41
+ #define NUMBA_PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type)
42
+
43
+ #endif /* NUMBA_COMMON_H_ */
lib/python3.10/site-packages/numba/_pymodule.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NUMBA_PY_MODULE_H_
2
+ #define NUMBA_PY_MODULE_H_
3
+
4
+ #define PY_SSIZE_T_CLEAN
5
+
6
+ #include "Python.h"
7
+ #include "structmember.h"
8
+ #include "frameobject.h"
9
+
10
+ #define MOD_ERROR_VAL NULL
11
+ #define MOD_SUCCESS_VAL(val) val
12
+ #define MOD_INIT(name) PyMODINIT_FUNC PyInit_##name(void)
13
+ #define MOD_DEF(ob, name, doc, methods) { \
14
+ static struct PyModuleDef moduledef = { \
15
+ PyModuleDef_HEAD_INIT, name, doc, -1, methods, NULL, NULL, NULL, NULL }; \
16
+ ob = PyModule_Create(&moduledef); }
17
+ #define MOD_INIT_EXEC(name) PyInit_##name();
18
+
19
+ #define PyString_AsString PyUnicode_AsUTF8
20
+ #define PyString_Check PyUnicode_Check
21
+ #define PyString_FromFormat PyUnicode_FromFormat
22
+ #define PyString_FromString PyUnicode_FromString
23
+ #define PyString_InternFromString PyUnicode_InternFromString
24
+ #define PyInt_Type PyLong_Type
25
+ #define PyInt_Check PyLong_Check
26
+ #define PyInt_CheckExact PyLong_CheckExact
27
+ #define SetAttrStringFromVoidPointer(m, name) do { \
28
+ PyObject *tmp = PyLong_FromVoidPtr((void *) &name); \
29
+ PyObject_SetAttrString(m, #name, tmp); \
30
+ Py_DECREF(tmp); } while (0)
31
+
32
+
33
+ #define NB_SUPPORTED_PYTHON_MINOR ((PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12) || (PY_MINOR_VERSION == 13))
34
+
35
+ #endif /* NUMBA_PY_MODULE_H_ */
lib/python3.10/site-packages/numba/_random.c ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * PRNG support.
3
+ */
4
+
5
+ #ifdef _MSC_VER
6
+ #define HAVE_PTHREAD_ATFORK 0
7
+ #else
8
+ #define HAVE_PTHREAD_ATFORK 1
9
+ #include <pthread.h>
10
+ #endif
11
+
12
+
13
+ /* Magic Mersenne Twister constants */
14
+ #define MT_N 624
15
+ #define MT_M 397
16
+ #define MT_MATRIX_A 0x9908b0dfU
17
+ #define MT_UPPER_MASK 0x80000000U
18
+ #define MT_LOWER_MASK 0x7fffffffU
19
+
20
+ /*
21
+ * Note this structure is accessed in numba.targets.randomimpl,
22
+ * any changes here should be reflected there too.
23
+ */
24
+ typedef struct {
25
+ int index;
26
+ /* unsigned int is sufficient on modern machines as we only need 32 bits */
27
+ unsigned int mt[MT_N];
28
+ int has_gauss;
29
+ double gauss;
30
+ int is_initialized;
31
+ } rnd_state_t;
32
+
33
+ /* Some code portions below from CPython's _randommodule.c, some others
34
+ from Numpy's and Jean-Sebastien Roy's randomkit.c. */
35
+
36
+ NUMBA_EXPORT_FUNC(void)
37
+ numba_rnd_shuffle(rnd_state_t *state)
38
+ {
39
+ int i;
40
+ unsigned int y;
41
+
42
+ for (i = 0; i < MT_N - MT_M; i++) {
43
+ y = (state->mt[i] & MT_UPPER_MASK) | (state->mt[i+1] & MT_LOWER_MASK);
44
+ state->mt[i] = state->mt[i+MT_M] ^ (y >> 1) ^
45
+ (-(int) (y & 1) & MT_MATRIX_A);
46
+ }
47
+ for (; i < MT_N - 1; i++) {
48
+ y = (state->mt[i] & MT_UPPER_MASK) | (state->mt[i+1] & MT_LOWER_MASK);
49
+ state->mt[i] = state->mt[i+(MT_M-MT_N)] ^ (y >> 1) ^
50
+ (-(int) (y & 1) & MT_MATRIX_A);
51
+ }
52
+ y = (state->mt[MT_N - 1] & MT_UPPER_MASK) | (state->mt[0] & MT_LOWER_MASK);
53
+ state->mt[MT_N - 1] = state->mt[MT_M - 1] ^ (y >> 1) ^
54
+ (-(int) (y & 1) & MT_MATRIX_A);
55
+ }
56
+
57
+ /* Initialize mt[] with an integer seed */
58
+ NUMBA_EXPORT_FUNC(void)
59
+ numba_rnd_init(rnd_state_t *state, unsigned int seed)
60
+ {
61
+ unsigned int pos;
62
+ seed &= 0xffffffffU;
63
+
64
+ /* Knuth's PRNG as used in the Mersenne Twister reference implementation */
65
+ for (pos = 0; pos < MT_N; pos++) {
66
+ state->mt[pos] = seed;
67
+ seed = (1812433253U * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffU;
68
+ }
69
+ state->index = MT_N;
70
+ state->has_gauss = 0;
71
+ state->gauss = 0.0;
72
+ state->is_initialized = 1;
73
+ }
74
+
75
+ /* Perturb mt[] with a key array */
76
+ static void
77
+ rnd_init_by_array(rnd_state_t *state, unsigned int init_key[], size_t key_length)
78
+ {
79
+ size_t i, j, k;
80
+ unsigned int *mt = state->mt;
81
+
82
+ numba_rnd_init(state, 19650218U);
83
+ i = 1; j = 0;
84
+ k = (MT_N > key_length ? MT_N : key_length);
85
+ for (; k; k--) {
86
+ mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1664525U))
87
+ + init_key[j] + (unsigned int) j; /* non linear */
88
+ mt[i] &= 0xffffffffU;
89
+ i++; j++;
90
+ if (i >= MT_N) { mt[0] = mt[MT_N - 1]; i = 1; }
91
+ if (j >= key_length) j = 0;
92
+ }
93
+ for (k = MT_N - 1; k; k--) {
94
+ mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941U))
95
+ - (unsigned int) i; /* non linear */
96
+ mt[i] &= 0xffffffffU;
97
+ i++;
98
+ if (i >= MT_N) { mt[0] = mt[MT_N - 1]; i=1; }
99
+ }
100
+
101
+ mt[0] = 0x80000000U; /* MSB is 1; ensuring non-zero initial array */
102
+ state->index = MT_N;
103
+ state->has_gauss = 0;
104
+ state->gauss = 0.0;
105
+ state->is_initialized = 1;
106
+ }
107
+
108
+ /*
109
+ * Management of thread-local random state.
110
+ */
111
+
112
+ static int rnd_globally_initialized;
113
+
114
+ #ifdef _MSC_VER
115
+ #define THREAD_LOCAL(ty) __declspec(thread) ty
116
+ #else
117
+ /* Non-standard C99 extension that's understood by gcc and clang */
118
+ #define THREAD_LOCAL(ty) __thread ty
119
+ #endif
120
+
121
+ static THREAD_LOCAL(rnd_state_t) numba_py_random_state;
122
+ static THREAD_LOCAL(rnd_state_t) numba_np_random_state;
123
+ static THREAD_LOCAL(rnd_state_t) numba_internal_random_state;
124
+
125
+ /* Seed the state with random bytes */
126
+ static int
127
+ rnd_seed_with_bytes(rnd_state_t *state, Py_buffer *buf)
128
+ {
129
+ unsigned int *keys;
130
+ unsigned char *bytes;
131
+ size_t i, nkeys;
132
+
133
+ nkeys = buf->len / sizeof(unsigned int);
134
+ keys = (unsigned int *) PyMem_Malloc(nkeys * sizeof(unsigned int));
135
+ if (keys == NULL) {
136
+ PyBuffer_Release(buf);
137
+ return -1;
138
+ }
139
+ bytes = (unsigned char *) buf->buf;
140
+ /* Convert input bytes to int32 keys, without violating alignment
141
+ * constraints.
142
+ */
143
+ for (i = 0; i < nkeys; i++, bytes += 4) {
144
+ keys[i] =
145
+ ((unsigned int)bytes[3] << 24) +
146
+ ((unsigned int)bytes[2] << 16) +
147
+ ((unsigned int)bytes[1] << 8) +
148
+ ((unsigned int)bytes[0] << 0);
149
+ }
150
+ PyBuffer_Release(buf);
151
+ rnd_init_by_array(state, keys, nkeys);
152
+ PyMem_Free(keys);
153
+ return 0;
154
+ }
155
+
156
+ #if HAVE_PTHREAD_ATFORK
157
+ /* After a fork(), the child should reseed its random states.
158
+ * Since only the main thread survives in the child, it's enough to mark
159
+ * the current thread-local states as uninitialized.
160
+ */
161
+ static void
162
+ rnd_atfork_child(void)
163
+ {
164
+ numba_py_random_state.is_initialized = 0;
165
+ numba_np_random_state.is_initialized = 0;
166
+ numba_internal_random_state.is_initialized = 0;
167
+ }
168
+ #endif
169
+
170
+ /* Global initialization routine. It must be called as early as possible.
171
+ */
172
+ NUMBA_EXPORT_FUNC(void)
173
+ numba_rnd_ensure_global_init(void)
174
+ {
175
+ if (!rnd_globally_initialized) {
176
+ #if HAVE_PTHREAD_ATFORK
177
+ pthread_atfork(NULL, NULL, rnd_atfork_child);
178
+ #endif
179
+ numba_py_random_state.is_initialized = 0;
180
+ numba_np_random_state.is_initialized = 0;
181
+ numba_internal_random_state.is_initialized = 0;
182
+ rnd_globally_initialized = 1;
183
+ }
184
+ }
185
+
186
+ /* First-time init a random state */
187
+ static void
188
+ rnd_implicit_init(rnd_state_t *state)
189
+ {
190
+ /* Initialize with random bytes. The easiest way to get good-quality
191
+ * cross-platform random bytes is still to call os.urandom()
192
+ * using the Python interpreter...
193
+ */
194
+ PyObject *module, *bufobj;
195
+ Py_buffer buf;
196
+ PyGILState_STATE gilstate = PyGILState_Ensure();
197
+
198
+ module = PyImport_ImportModule("os");
199
+ if (module == NULL)
200
+ goto error;
201
+ /* Read as many bytes as necessary to get the full entropy
202
+ * exploitable by the MT generator.
203
+ */
204
+ bufobj = PyObject_CallMethod(module, "urandom", "i",
205
+ (int) (MT_N * sizeof(unsigned int)));
206
+ Py_DECREF(module);
207
+ if (bufobj == NULL)
208
+ goto error;
209
+ if (PyObject_GetBuffer(bufobj, &buf, PyBUF_SIMPLE))
210
+ goto error;
211
+ Py_DECREF(bufobj);
212
+ if (rnd_seed_with_bytes(state, &buf))
213
+ goto error;
214
+ /* state->is_initialized is set now */
215
+
216
+ PyGILState_Release(gilstate);
217
+ return;
218
+
219
+ error:
220
+ /* In normal conditions, os.urandom() and PyMem_Malloc() shouldn't fail,
221
+ * and we don't want the caller to deal with errors, so just bail out.
222
+ */
223
+ if (PyErr_Occurred())
224
+ PyErr_Print();
225
+ Py_FatalError(NULL);
226
+ }
227
+
228
+ /* Functions returning the thread-local random state pointer.
229
+ * The LLVM JIT doesn't support thread-local variables so we rely
230
+ * on the C compiler instead.
231
+ */
232
+
233
+ NUMBA_EXPORT_FUNC(rnd_state_t *)
234
+ numba_get_py_random_state(void)
235
+ {
236
+ rnd_state_t *state = &numba_py_random_state;
237
+ if (!state->is_initialized)
238
+ rnd_implicit_init(state);
239
+ return state;
240
+ }
241
+
242
+ NUMBA_EXPORT_FUNC(rnd_state_t *)
243
+ numba_get_np_random_state(void)
244
+ {
245
+ rnd_state_t *state = &numba_np_random_state;
246
+ if (!state->is_initialized)
247
+ rnd_implicit_init(state);
248
+ return state;
249
+ }
250
+
251
+ NUMBA_EXPORT_FUNC(rnd_state_t *)
252
+ numba_get_internal_random_state(void)
253
+ {
254
+ rnd_state_t *state = &numba_internal_random_state;
255
+ if (!state->is_initialized)
256
+ rnd_implicit_init(state);
257
+ return state;
258
+ }
259
+
260
+ /*
261
+ * Python-exposed helpers for state management and testing.
262
+ */
263
+ static int
264
+ rnd_state_converter(PyObject *obj, rnd_state_t **state)
265
+ {
266
+ *state = (rnd_state_t *) PyLong_AsVoidPtr(obj);
267
+ return (*state != NULL || !PyErr_Occurred());
268
+ }
269
+
270
+ NUMBA_EXPORT_FUNC(PyObject *)
271
+ _numba_rnd_get_py_state_ptr(PyObject *self)
272
+ {
273
+ return PyLong_FromVoidPtr(numba_get_py_random_state());
274
+ }
275
+
276
+ NUMBA_EXPORT_FUNC(PyObject *)
277
+ _numba_rnd_get_np_state_ptr(PyObject *self)
278
+ {
279
+ return PyLong_FromVoidPtr(numba_get_np_random_state());
280
+ }
281
+
282
+ NUMBA_EXPORT_FUNC(PyObject *)
283
+ _numba_rnd_shuffle(PyObject *self, PyObject *arg)
284
+ {
285
+ rnd_state_t *state;
286
+ if (!rnd_state_converter(arg, &state))
287
+ return NULL;
288
+ numba_rnd_shuffle(state);
289
+ Py_RETURN_NONE;
290
+ }
291
+
292
+ NUMBA_EXPORT_FUNC(PyObject *)
293
+ _numba_rnd_set_state(PyObject *self, PyObject *args)
294
+ {
295
+ int i, index;
296
+ rnd_state_t *state;
297
+ PyObject *tuplearg, *intlist;
298
+
299
+ if (!PyArg_ParseTuple(args, "O&O!:rnd_set_state",
300
+ rnd_state_converter, &state,
301
+ &PyTuple_Type, &tuplearg))
302
+ return NULL;
303
+ if (!PyArg_ParseTuple(tuplearg, "iO!", &index, &PyList_Type, &intlist))
304
+ return NULL;
305
+ if (PyList_GET_SIZE(intlist) != MT_N) {
306
+ PyErr_SetString(PyExc_ValueError, "list object has wrong size");
307
+ return NULL;
308
+ }
309
+ state->index = index;
310
+ for (i = 0; i < MT_N; i++) {
311
+ PyObject *v = PyList_GET_ITEM(intlist, i);
312
+ unsigned long x = PyLong_AsUnsignedLong(v);
313
+ if (x == (unsigned long) -1 && PyErr_Occurred())
314
+ return NULL;
315
+ state->mt[i] = (unsigned int) x;
316
+ }
317
+ state->has_gauss = 0;
318
+ state->gauss = 0.0;
319
+ state->is_initialized = 1;
320
+ Py_RETURN_NONE;
321
+ }
322
+
323
+ NUMBA_EXPORT_FUNC(PyObject *)
324
+ _numba_rnd_get_state(PyObject *self, PyObject *arg)
325
+ {
326
+ PyObject *intlist;
327
+ int i;
328
+ rnd_state_t *state;
329
+ if (!rnd_state_converter(arg, &state))
330
+ return NULL;
331
+
332
+ intlist = PyList_New(MT_N);
333
+ if (intlist == NULL)
334
+ return NULL;
335
+ for (i = 0; i < MT_N; i++) {
336
+ PyObject *v = PyLong_FromUnsignedLong(state->mt[i]);
337
+ if (v == NULL) {
338
+ Py_DECREF(intlist);
339
+ return NULL;
340
+ }
341
+ PyList_SET_ITEM(intlist, i, v);
342
+ }
343
+ return Py_BuildValue("iN", state->index, intlist);
344
+ }
345
+
346
+ NUMBA_EXPORT_FUNC(PyObject *)
347
+ _numba_rnd_seed(PyObject *self, PyObject *args)
348
+ {
349
+ unsigned int seed;
350
+ rnd_state_t *state;
351
+
352
+ if (!PyArg_ParseTuple(args, "O&I:rnd_seed",
353
+ rnd_state_converter, &state, &seed)) {
354
+ /* rnd_seed_*(bytes-like object) */
355
+ Py_buffer buf;
356
+
357
+ PyErr_Clear();
358
+ if (!PyArg_ParseTuple(args, "O&s*:rnd_seed",
359
+ rnd_state_converter, &state, &buf))
360
+ return NULL;
361
+
362
+ if (rnd_seed_with_bytes(state, &buf))
363
+ return NULL;
364
+ else
365
+ Py_RETURN_NONE;
366
+ }
367
+ else {
368
+ /* rnd_seed_*(int32) */
369
+ numba_rnd_init(state, seed);
370
+ Py_RETURN_NONE;
371
+ }
372
+ }
373
+
374
+ /*
375
+ * Random distribution helpers.
376
+ * Most code straight from Numpy's distributions.c.
377
+ */
378
+
379
+ #ifndef M_PI
380
+ #define M_PI 3.14159265358979323846264338328
381
+ #endif
382
+
383
+ NUMBA_EXPORT_FUNC(unsigned int)
384
+ get_next_int32(rnd_state_t *state)
385
+ {
386
+ unsigned int y;
387
+
388
+ if (state->index == MT_N) {
389
+ numba_rnd_shuffle(state);
390
+ state->index = 0;
391
+ }
392
+ y = state->mt[state->index++];
393
+ /* Tempering */
394
+ y ^= (y >> 11);
395
+ y ^= (y << 7) & 0x9d2c5680U;
396
+ y ^= (y << 15) & 0xefc60000U;
397
+ y ^= (y >> 18);
398
+ return y;
399
+ }
400
+
401
+ NUMBA_EXPORT_FUNC(double)
402
+ get_next_double(rnd_state_t *state)
403
+ {
404
+ double a = get_next_int32(state) >> 5;
405
+ double b = get_next_int32(state) >> 6;
406
+ return (a * 67108864.0 + b) / 9007199254740992.0;
407
+ }
408
+
409
+ NUMBA_EXPORT_FUNC(double)
410
+ loggam(double x)
411
+ {
412
+ double x0, x2, xp, gl, gl0;
413
+ long k, n;
414
+
415
+ static double a[10] = {8.333333333333333e-02,-2.777777777777778e-03,
416
+ 7.936507936507937e-04,-5.952380952380952e-04,
417
+ 8.417508417508418e-04,-1.917526917526918e-03,
418
+ 6.410256410256410e-03,-2.955065359477124e-02,
419
+ 1.796443723688307e-01,-1.39243221690590e+00};
420
+ x0 = x;
421
+ n = 0;
422
+ if ((x == 1.0) || (x == 2.0))
423
+ {
424
+ return 0.0;
425
+ }
426
+ else if (x <= 7.0)
427
+ {
428
+ n = (long)(7 - x);
429
+ x0 = x + n;
430
+ }
431
+ x2 = 1.0/(x0*x0);
432
+ xp = 2*M_PI;
433
+ gl0 = a[9];
434
+ for (k=8; k>=0; k--)
435
+ {
436
+ gl0 *= x2;
437
+ gl0 += a[k];
438
+ }
439
+ gl = gl0/x0 + 0.5*log(xp) + (x0-0.5)*log(x0) - x0;
440
+ if (x <= 7.0)
441
+ {
442
+ for (k=1; k<=n; k++)
443
+ {
444
+ gl -= log(x0-1.0);
445
+ x0 -= 1.0;
446
+ }
447
+ }
448
+ return gl;
449
+ }
450
+
451
+
452
+ NUMBA_EXPORT_FUNC(int64_t)
453
+ numba_poisson_ptrs(rnd_state_t *state, double lam)
454
+ {
455
+ /* This method is invoked only if the parameter lambda of this
456
+ * distribution is big enough ( >= 10 ). The algorithm used is
457
+ * described in "Hörmann, W. 1992. 'The Transformed Rejection
458
+ * Method for Generating Poisson Random Variables'.
459
+ * The implementation comes straight from Numpy.
460
+ */
461
+ int64_t k;
462
+ double U, V, slam, loglam, a, b, invalpha, vr, us;
463
+
464
+ slam = sqrt(lam);
465
+ loglam = log(lam);
466
+ b = 0.931 + 2.53*slam;
467
+ a = -0.059 + 0.02483*b;
468
+ invalpha = 1.1239 + 1.1328/(b-3.4);
469
+ vr = 0.9277 - 3.6224/(b-2);
470
+
471
+ while (1)
472
+ {
473
+ U = get_next_double(state) - 0.5;
474
+ V = get_next_double(state);
475
+ us = 0.5 - fabs(U);
476
+ k = (int64_t) floor((2*a/us + b)*U + lam + 0.43);
477
+ if ((us >= 0.07) && (V <= vr))
478
+ {
479
+ return k;
480
+ }
481
+ if ((k < 0) ||
482
+ ((us < 0.013) && (V > us)))
483
+ {
484
+ continue;
485
+ }
486
+ if ((log(V) + log(invalpha) - log(a/(us*us)+b)) <=
487
+ (-lam + (double) k*loglam - loggam((double) k+1)))
488
+ {
489
+ return k;
490
+ }
491
+ }
492
+ }
lib/python3.10/site-packages/numba/_typeof.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NUMBA_TYPEOF_H_
2
+ #define NUMBA_TYPEOF_H_
3
+
4
+ #ifdef __cplusplus
5
+ extern "C" {
6
+ #endif
7
+
8
+ extern PyObject *typeof_init(PyObject *self, PyObject *args);
9
+ extern int typeof_typecode(PyObject *dispatcher, PyObject *val);
10
+ extern PyObject *typeof_compute_fingerprint(PyObject *val);
11
+
12
+ #ifdef __cplusplus
13
+ }
14
+ #endif
15
+
16
+ #endif /* NUMBA_TYPEOF_H_ */