nnilayy commited on
Commit
cee0631
·
verified ·
1 Parent(s): 298c742

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. lib/python3.10/site-packages/fugashi.libs/libmecab-eada4a80.so.2.0.0 +3 -0
  3. lib/python3.10/site-packages/nltk/ccg/__init__.py +34 -0
  4. lib/python3.10/site-packages/nltk/ccg/api.py +358 -0
  5. lib/python3.10/site-packages/nltk/ccg/chart.py +480 -0
  6. lib/python3.10/site-packages/nltk/ccg/combinator.py +339 -0
  7. lib/python3.10/site-packages/nltk/ccg/lexicon.py +338 -0
  8. lib/python3.10/site-packages/nltk/ccg/logic.py +60 -0
  9. lib/python3.10/site-packages/nltk/classify/__init__.py +101 -0
  10. lib/python3.10/site-packages/nltk/classify/api.py +195 -0
  11. lib/python3.10/site-packages/nltk/classify/decisiontree.py +349 -0
  12. lib/python3.10/site-packages/nltk/classify/maxent.py +1569 -0
  13. lib/python3.10/site-packages/nltk/classify/megam.py +184 -0
  14. lib/python3.10/site-packages/nltk/classify/naivebayes.py +260 -0
  15. lib/python3.10/site-packages/nltk/classify/textcat.py +197 -0
  16. lib/python3.10/site-packages/nltk/cluster/gaac.py +170 -0
  17. lib/python3.10/site-packages/nltk/cluster/kmeans.py +231 -0
  18. lib/python3.10/site-packages/nltk/cluster/util.py +300 -0
  19. lib/python3.10/site-packages/nltk/corpus/__init__.py +529 -0
  20. lib/python3.10/site-packages/nltk/corpus/europarl_raw.py +56 -0
  21. lib/python3.10/site-packages/nltk/corpus/reader/bnc.py +265 -0
  22. lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py +237 -0
  23. lib/python3.10/site-packages/nltk/corpus/reader/categorized_sents.py +168 -0
  24. lib/python3.10/site-packages/nltk/corpus/reader/chasen.py +158 -0
  25. lib/python3.10/site-packages/nltk/corpus/reader/chunked.py +273 -0
  26. lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py +309 -0
  27. lib/python3.10/site-packages/nltk/corpus/reader/dependency.py +115 -0
  28. lib/python3.10/site-packages/nltk/corpus/reader/framenet.py +0 -0
  29. lib/python3.10/site-packages/nltk/corpus/reader/knbc.py +188 -0
  30. lib/python3.10/site-packages/nltk/corpus/reader/lin.py +183 -0
  31. lib/python3.10/site-packages/nltk/corpus/reader/markdown.py +342 -0
  32. lib/python3.10/site-packages/nltk/corpus/reader/mte.py +397 -0
  33. lib/python3.10/site-packages/nltk/corpus/reader/nombank.py +466 -0
  34. lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py +125 -0
  35. lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py +95 -0
  36. lib/python3.10/site-packages/nltk/corpus/reader/plaintext.py +227 -0
  37. lib/python3.10/site-packages/nltk/corpus/reader/ppattach.py +95 -0
  38. lib/python3.10/site-packages/nltk/corpus/reader/rte.py +146 -0
  39. lib/python3.10/site-packages/nltk/corpus/reader/senseval.py +196 -0
  40. lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py +125 -0
  41. lib/python3.10/site-packages/nltk/corpus/reader/tagged.py +354 -0
  42. lib/python3.10/site-packages/nltk/corpus/reader/twitter.py +136 -0
  43. lib/python3.10/site-packages/nltk/corpus/reader/udhr.py +75 -0
  44. lib/python3.10/site-packages/nltk/corpus/reader/util.py +867 -0
  45. lib/python3.10/site-packages/nltk/corpus/reader/verbnet.py +629 -0
  46. lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py +166 -0
  47. lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py +2489 -0
  48. lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py +256 -0
  49. lib/python3.10/site-packages/nltk/corpus/util.py +154 -0
  50. lib/python3.10/site-packages/nltk/inference/resolution.py +759 -0
.gitattributes CHANGED
@@ -65,3 +65,4 @@ lib/python3.10/site-packages/google/_upb/_message.abi3.so filter=lfs diff=lfs me
65
  lib/python3.10/site-packages/propcache/_helpers_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
66
  lib/python3.10/site-packages/rpds/rpds.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
67
  lib/python3.10/site-packages/grpc/_cython/cygrpc.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
65
  lib/python3.10/site-packages/propcache/_helpers_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
66
  lib/python3.10/site-packages/rpds/rpds.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
67
  lib/python3.10/site-packages/grpc/_cython/cygrpc.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
68
+ lib/python3.10/site-packages/fugashi.libs/libmecab-eada4a80.so.2.0.0 filter=lfs diff=lfs merge=lfs -text
lib/python3.10/site-packages/fugashi.libs/libmecab-eada4a80.so.2.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5718aced8687227b57811c14a9fd238526fc9d74e206a35e0438b1de4f7beb66
3
+ size 1014137
lib/python3.10/site-packages/nltk/ccg/__init__.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Combinatory Categorial Grammar.
10
+
11
+ For more information see nltk/doc/contrib/ccg/ccg.pdf
12
+ """
13
+
14
+ from nltk.ccg.chart import CCGChart, CCGChartParser, CCGEdge, CCGLeafEdge
15
+ from nltk.ccg.combinator import (
16
+ BackwardApplication,
17
+ BackwardBx,
18
+ BackwardCombinator,
19
+ BackwardComposition,
20
+ BackwardSx,
21
+ BackwardT,
22
+ DirectedBinaryCombinator,
23
+ ForwardApplication,
24
+ ForwardCombinator,
25
+ ForwardComposition,
26
+ ForwardSubstitution,
27
+ ForwardT,
28
+ UndirectedBinaryCombinator,
29
+ UndirectedComposition,
30
+ UndirectedFunctionApplication,
31
+ UndirectedSubstitution,
32
+ UndirectedTypeRaise,
33
+ )
34
+ from nltk.ccg.lexicon import CCGLexicon
lib/python3.10/site-packages/nltk/ccg/api.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: CCG Categories
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from abc import ABCMeta, abstractmethod
9
+ from functools import total_ordering
10
+
11
+ from nltk.internals import raise_unorderable_types
12
+
13
+
14
+ @total_ordering
15
+ class AbstractCCGCategory(metaclass=ABCMeta):
16
+ """
17
+ Interface for categories in combinatory grammars.
18
+ """
19
+
20
+ @abstractmethod
21
+ def is_primitive(self):
22
+ """
23
+ Returns true if the category is primitive.
24
+ """
25
+
26
+ @abstractmethod
27
+ def is_function(self):
28
+ """
29
+ Returns true if the category is a function application.
30
+ """
31
+
32
+ @abstractmethod
33
+ def is_var(self):
34
+ """
35
+ Returns true if the category is a variable.
36
+ """
37
+
38
+ @abstractmethod
39
+ def substitute(self, substitutions):
40
+ """
41
+ Takes a set of (var, category) substitutions, and replaces every
42
+ occurrence of the variable with the corresponding category.
43
+ """
44
+
45
+ @abstractmethod
46
+ def can_unify(self, other):
47
+ """
48
+ Determines whether two categories can be unified.
49
+ - Returns None if they cannot be unified
50
+ - Returns a list of necessary substitutions if they can.
51
+ """
52
+
53
+ # Utility functions: comparison, strings and hashing.
54
+ @abstractmethod
55
+ def __str__(self):
56
+ pass
57
+
58
+ def __eq__(self, other):
59
+ return (
60
+ self.__class__ is other.__class__
61
+ and self._comparison_key == other._comparison_key
62
+ )
63
+
64
+ def __ne__(self, other):
65
+ return not self == other
66
+
67
+ def __lt__(self, other):
68
+ if not isinstance(other, AbstractCCGCategory):
69
+ raise_unorderable_types("<", self, other)
70
+ if self.__class__ is other.__class__:
71
+ return self._comparison_key < other._comparison_key
72
+ else:
73
+ return self.__class__.__name__ < other.__class__.__name__
74
+
75
+ def __hash__(self):
76
+ try:
77
+ return self._hash
78
+ except AttributeError:
79
+ self._hash = hash(self._comparison_key)
80
+ return self._hash
81
+
82
+
83
+ class CCGVar(AbstractCCGCategory):
84
+ """
85
+ Class representing a variable CCG category.
86
+ Used for conjunctions (and possibly type-raising, if implemented as a
87
+ unary rule).
88
+ """
89
+
90
+ _maxID = 0
91
+
92
+ def __init__(self, prim_only=False):
93
+ """Initialize a variable (selects a new identifier)
94
+
95
+ :param prim_only: a boolean that determines whether the variable is
96
+ restricted to primitives
97
+ :type prim_only: bool
98
+ """
99
+ self._id = self.new_id()
100
+ self._prim_only = prim_only
101
+ self._comparison_key = self._id
102
+
103
+ @classmethod
104
+ def new_id(cls):
105
+ """
106
+ A class method allowing generation of unique variable identifiers.
107
+ """
108
+ cls._maxID = cls._maxID + 1
109
+ return cls._maxID - 1
110
+
111
+ @classmethod
112
+ def reset_id(cls):
113
+ cls._maxID = 0
114
+
115
+ def is_primitive(self):
116
+ return False
117
+
118
+ def is_function(self):
119
+ return False
120
+
121
+ def is_var(self):
122
+ return True
123
+
124
+ def substitute(self, substitutions):
125
+ """If there is a substitution corresponding to this variable,
126
+ return the substituted category.
127
+ """
128
+ for (var, cat) in substitutions:
129
+ if var == self:
130
+ return cat
131
+ return self
132
+
133
+ def can_unify(self, other):
134
+ """If the variable can be replaced with other
135
+ a substitution is returned.
136
+ """
137
+ if other.is_primitive() or not self._prim_only:
138
+ return [(self, other)]
139
+ return None
140
+
141
+ def id(self):
142
+ return self._id
143
+
144
+ def __str__(self):
145
+ return "_var" + str(self._id)
146
+
147
+
148
+ @total_ordering
149
+ class Direction:
150
+ """
151
+ Class representing the direction of a function application.
152
+ Also contains maintains information as to which combinators
153
+ may be used with the category.
154
+ """
155
+
156
+ def __init__(self, dir, restrictions):
157
+ self._dir = dir
158
+ self._restrs = restrictions
159
+ self._comparison_key = (dir, tuple(restrictions))
160
+
161
+ # Testing the application direction
162
+ def is_forward(self):
163
+ return self._dir == "/"
164
+
165
+ def is_backward(self):
166
+ return self._dir == "\\"
167
+
168
+ def dir(self):
169
+ return self._dir
170
+
171
+ def restrs(self):
172
+ """A list of restrictions on the combinators.
173
+ '.' denotes that permuting operations are disallowed
174
+ ',' denotes that function composition is disallowed
175
+ '_' denotes that the direction has variable restrictions.
176
+ (This is redundant in the current implementation of type-raising)
177
+ """
178
+ return self._restrs
179
+
180
+ def is_variable(self):
181
+ return self._restrs == "_"
182
+
183
+ # Unification and substitution of variable directions.
184
+ # Used only if type-raising is implemented as a unary rule, as it
185
+ # must inherit restrictions from the argument category.
186
+ def can_unify(self, other):
187
+ if other.is_variable():
188
+ return [("_", self.restrs())]
189
+ elif self.is_variable():
190
+ return [("_", other.restrs())]
191
+ else:
192
+ if self.restrs() == other.restrs():
193
+ return []
194
+ return None
195
+
196
+ def substitute(self, subs):
197
+ if not self.is_variable():
198
+ return self
199
+
200
+ for (var, restrs) in subs:
201
+ if var == "_":
202
+ return Direction(self._dir, restrs)
203
+ return self
204
+
205
+ # Testing permitted combinators
206
+ def can_compose(self):
207
+ return "," not in self._restrs
208
+
209
+ def can_cross(self):
210
+ return "." not in self._restrs
211
+
212
+ def __eq__(self, other):
213
+ return (
214
+ self.__class__ is other.__class__
215
+ and self._comparison_key == other._comparison_key
216
+ )
217
+
218
+ def __ne__(self, other):
219
+ return not self == other
220
+
221
+ def __lt__(self, other):
222
+ if not isinstance(other, Direction):
223
+ raise_unorderable_types("<", self, other)
224
+ if self.__class__ is other.__class__:
225
+ return self._comparison_key < other._comparison_key
226
+ else:
227
+ return self.__class__.__name__ < other.__class__.__name__
228
+
229
+ def __hash__(self):
230
+ try:
231
+ return self._hash
232
+ except AttributeError:
233
+ self._hash = hash(self._comparison_key)
234
+ return self._hash
235
+
236
+ def __str__(self):
237
+ r_str = ""
238
+ for r in self._restrs:
239
+ r_str = r_str + "%s" % r
240
+ return f"{self._dir}{r_str}"
241
+
242
+ # The negation operator reverses the direction of the application
243
+ def __neg__(self):
244
+ if self._dir == "/":
245
+ return Direction("\\", self._restrs)
246
+ else:
247
+ return Direction("/", self._restrs)
248
+
249
+
250
+ class PrimitiveCategory(AbstractCCGCategory):
251
+ """
252
+ Class representing primitive categories.
253
+ Takes a string representation of the category, and a
254
+ list of strings specifying the morphological subcategories.
255
+ """
256
+
257
+ def __init__(self, categ, restrictions=[]):
258
+ self._categ = categ
259
+ self._restrs = restrictions
260
+ self._comparison_key = (categ, tuple(restrictions))
261
+
262
+ def is_primitive(self):
263
+ return True
264
+
265
+ def is_function(self):
266
+ return False
267
+
268
+ def is_var(self):
269
+ return False
270
+
271
+ def restrs(self):
272
+ return self._restrs
273
+
274
+ def categ(self):
275
+ return self._categ
276
+
277
+ # Substitution does nothing to a primitive category
278
+ def substitute(self, subs):
279
+ return self
280
+
281
+ # A primitive can be unified with a class of the same
282
+ # base category, given that the other category shares all
283
+ # of its subclasses, or with a variable.
284
+ def can_unify(self, other):
285
+ if not other.is_primitive():
286
+ return None
287
+ if other.is_var():
288
+ return [(other, self)]
289
+ if other.categ() == self.categ():
290
+ for restr in self._restrs:
291
+ if restr not in other.restrs():
292
+ return None
293
+ return []
294
+ return None
295
+
296
+ def __str__(self):
297
+ if self._restrs == []:
298
+ return "%s" % self._categ
299
+ restrictions = "[%s]" % ",".join(repr(r) for r in self._restrs)
300
+ return f"{self._categ}{restrictions}"
301
+
302
+
303
+ class FunctionalCategory(AbstractCCGCategory):
304
+ """
305
+ Class that represents a function application category.
306
+ Consists of argument and result categories, together with
307
+ an application direction.
308
+ """
309
+
310
+ def __init__(self, res, arg, dir):
311
+ self._res = res
312
+ self._arg = arg
313
+ self._dir = dir
314
+ self._comparison_key = (arg, dir, res)
315
+
316
+ def is_primitive(self):
317
+ return False
318
+
319
+ def is_function(self):
320
+ return True
321
+
322
+ def is_var(self):
323
+ return False
324
+
325
+ # Substitution returns the category consisting of the
326
+ # substitution applied to each of its constituents.
327
+ def substitute(self, subs):
328
+ sub_res = self._res.substitute(subs)
329
+ sub_dir = self._dir.substitute(subs)
330
+ sub_arg = self._arg.substitute(subs)
331
+ return FunctionalCategory(sub_res, sub_arg, self._dir)
332
+
333
+ # A function can unify with another function, so long as its
334
+ # constituents can unify, or with an unrestricted variable.
335
+ def can_unify(self, other):
336
+ if other.is_var():
337
+ return [(other, self)]
338
+ if other.is_function():
339
+ sa = self._res.can_unify(other.res())
340
+ sd = self._dir.can_unify(other.dir())
341
+ if sa is not None and sd is not None:
342
+ sb = self._arg.substitute(sa).can_unify(other.arg().substitute(sa))
343
+ if sb is not None:
344
+ return sa + sb
345
+ return None
346
+
347
+ # Constituent accessors
348
+ def arg(self):
349
+ return self._arg
350
+
351
+ def res(self):
352
+ return self._res
353
+
354
+ def dir(self):
355
+ return self._dir
356
+
357
+ def __str__(self):
358
+ return f"({self._res}{self._dir}{self._arg})"
lib/python3.10/site-packages/nltk/ccg/chart.py ADDED
@@ -0,0 +1,480 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ The lexicon is constructed by calling
10
+ ``lexicon.fromstring(<lexicon string>)``.
11
+
12
+ In order to construct a parser, you also need a rule set.
13
+ The standard English rules are provided in chart as
14
+ ``chart.DefaultRuleSet``.
15
+
16
+ The parser can then be constructed by calling, for example:
17
+ ``parser = chart.CCGChartParser(<lexicon>, <ruleset>)``
18
+
19
+ Parsing is then performed by running
20
+ ``parser.parse(<sentence>.split())``.
21
+
22
+ While this returns a list of trees, the default representation
23
+ of the produced trees is not very enlightening, particularly
24
+ given that it uses the same tree class as the CFG parsers.
25
+ It is probably better to call:
26
+ ``chart.printCCGDerivation(<parse tree extracted from list>)``
27
+ which should print a nice representation of the derivation.
28
+
29
+ This entire process is shown far more clearly in the demonstration:
30
+ python chart.py
31
+ """
32
+
33
+ import itertools
34
+
35
+ from nltk.ccg.combinator import *
36
+ from nltk.ccg.combinator import (
37
+ BackwardApplication,
38
+ BackwardBx,
39
+ BackwardComposition,
40
+ BackwardSx,
41
+ BackwardT,
42
+ ForwardApplication,
43
+ ForwardComposition,
44
+ ForwardSubstitution,
45
+ ForwardT,
46
+ )
47
+ from nltk.ccg.lexicon import Token, fromstring
48
+ from nltk.ccg.logic import *
49
+ from nltk.parse import ParserI
50
+ from nltk.parse.chart import AbstractChartRule, Chart, EdgeI
51
+ from nltk.sem.logic import *
52
+ from nltk.tree import Tree
53
+
54
+
55
+ # Based on the EdgeI class from NLTK.
56
+ # A number of the properties of the EdgeI interface don't
57
+ # transfer well to CCGs, however.
58
+ class CCGEdge(EdgeI):
59
+ def __init__(self, span, categ, rule):
60
+ self._span = span
61
+ self._categ = categ
62
+ self._rule = rule
63
+ self._comparison_key = (span, categ, rule)
64
+
65
+ # Accessors
66
+ def lhs(self):
67
+ return self._categ
68
+
69
+ def span(self):
70
+ return self._span
71
+
72
+ def start(self):
73
+ return self._span[0]
74
+
75
+ def end(self):
76
+ return self._span[1]
77
+
78
+ def length(self):
79
+ return self._span[1] - self.span[0]
80
+
81
+ def rhs(self):
82
+ return ()
83
+
84
+ def dot(self):
85
+ return 0
86
+
87
+ def is_complete(self):
88
+ return True
89
+
90
+ def is_incomplete(self):
91
+ return False
92
+
93
+ def nextsym(self):
94
+ return None
95
+
96
+ def categ(self):
97
+ return self._categ
98
+
99
+ def rule(self):
100
+ return self._rule
101
+
102
+
103
+ class CCGLeafEdge(EdgeI):
104
+ """
105
+ Class representing leaf edges in a CCG derivation.
106
+ """
107
+
108
+ def __init__(self, pos, token, leaf):
109
+ self._pos = pos
110
+ self._token = token
111
+ self._leaf = leaf
112
+ self._comparison_key = (pos, token.categ(), leaf)
113
+
114
+ # Accessors
115
+ def lhs(self):
116
+ return self._token.categ()
117
+
118
+ def span(self):
119
+ return (self._pos, self._pos + 1)
120
+
121
+ def start(self):
122
+ return self._pos
123
+
124
+ def end(self):
125
+ return self._pos + 1
126
+
127
+ def length(self):
128
+ return 1
129
+
130
+ def rhs(self):
131
+ return self._leaf
132
+
133
+ def dot(self):
134
+ return 0
135
+
136
+ def is_complete(self):
137
+ return True
138
+
139
+ def is_incomplete(self):
140
+ return False
141
+
142
+ def nextsym(self):
143
+ return None
144
+
145
+ def token(self):
146
+ return self._token
147
+
148
+ def categ(self):
149
+ return self._token.categ()
150
+
151
+ def leaf(self):
152
+ return self._leaf
153
+
154
+
155
+ class BinaryCombinatorRule(AbstractChartRule):
156
+ """
157
+ Class implementing application of a binary combinator to a chart.
158
+ Takes the directed combinator to apply.
159
+ """
160
+
161
+ NUMEDGES = 2
162
+
163
+ def __init__(self, combinator):
164
+ self._combinator = combinator
165
+
166
+ # Apply a combinator
167
+ def apply(self, chart, grammar, left_edge, right_edge):
168
+ # The left & right edges must be touching.
169
+ if not (left_edge.end() == right_edge.start()):
170
+ return
171
+
172
+ # Check if the two edges are permitted to combine.
173
+ # If so, generate the corresponding edge.
174
+ if self._combinator.can_combine(left_edge.categ(), right_edge.categ()):
175
+ for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
176
+ new_edge = CCGEdge(
177
+ span=(left_edge.start(), right_edge.end()),
178
+ categ=res,
179
+ rule=self._combinator,
180
+ )
181
+ if chart.insert(new_edge, (left_edge, right_edge)):
182
+ yield new_edge
183
+
184
+ # The representation of the combinator (for printing derivations)
185
+ def __str__(self):
186
+ return "%s" % self._combinator
187
+
188
+
189
+ # Type-raising must be handled slightly differently to the other rules, as the
190
+ # resulting rules only span a single edge, rather than both edges.
191
+
192
+
193
+ class ForwardTypeRaiseRule(AbstractChartRule):
194
+ """
195
+ Class for applying forward type raising
196
+ """
197
+
198
+ NUMEDGES = 2
199
+
200
+ def __init__(self):
201
+ self._combinator = ForwardT
202
+
203
+ def apply(self, chart, grammar, left_edge, right_edge):
204
+ if not (left_edge.end() == right_edge.start()):
205
+ return
206
+
207
+ for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
208
+ new_edge = CCGEdge(span=left_edge.span(), categ=res, rule=self._combinator)
209
+ if chart.insert(new_edge, (left_edge,)):
210
+ yield new_edge
211
+
212
+ def __str__(self):
213
+ return "%s" % self._combinator
214
+
215
+
216
+ class BackwardTypeRaiseRule(AbstractChartRule):
217
+ """
218
+ Class for applying backward type raising.
219
+ """
220
+
221
+ NUMEDGES = 2
222
+
223
+ def __init__(self):
224
+ self._combinator = BackwardT
225
+
226
+ def apply(self, chart, grammar, left_edge, right_edge):
227
+ if not (left_edge.end() == right_edge.start()):
228
+ return
229
+
230
+ for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
231
+ new_edge = CCGEdge(span=right_edge.span(), categ=res, rule=self._combinator)
232
+ if chart.insert(new_edge, (right_edge,)):
233
+ yield new_edge
234
+
235
+ def __str__(self):
236
+ return "%s" % self._combinator
237
+
238
+
239
+ # Common sets of combinators used for English derivations.
240
+ ApplicationRuleSet = [
241
+ BinaryCombinatorRule(ForwardApplication),
242
+ BinaryCombinatorRule(BackwardApplication),
243
+ ]
244
+ CompositionRuleSet = [
245
+ BinaryCombinatorRule(ForwardComposition),
246
+ BinaryCombinatorRule(BackwardComposition),
247
+ BinaryCombinatorRule(BackwardBx),
248
+ ]
249
+ SubstitutionRuleSet = [
250
+ BinaryCombinatorRule(ForwardSubstitution),
251
+ BinaryCombinatorRule(BackwardSx),
252
+ ]
253
+ TypeRaiseRuleSet = [ForwardTypeRaiseRule(), BackwardTypeRaiseRule()]
254
+
255
+ # The standard English rule set.
256
+ DefaultRuleSet = (
257
+ ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet + TypeRaiseRuleSet
258
+ )
259
+
260
+
261
+ class CCGChartParser(ParserI):
262
+ """
263
+ Chart parser for CCGs.
264
+ Based largely on the ChartParser class from NLTK.
265
+ """
266
+
267
+ def __init__(self, lexicon, rules, trace=0):
268
+ self._lexicon = lexicon
269
+ self._rules = rules
270
+ self._trace = trace
271
+
272
+ def lexicon(self):
273
+ return self._lexicon
274
+
275
+ # Implements the CYK algorithm
276
+ def parse(self, tokens):
277
+ tokens = list(tokens)
278
+ chart = CCGChart(list(tokens))
279
+ lex = self._lexicon
280
+
281
+ # Initialize leaf edges.
282
+ for index in range(chart.num_leaves()):
283
+ for token in lex.categories(chart.leaf(index)):
284
+ new_edge = CCGLeafEdge(index, token, chart.leaf(index))
285
+ chart.insert(new_edge, ())
286
+
287
+ # Select a span for the new edges
288
+ for span in range(2, chart.num_leaves() + 1):
289
+ for start in range(0, chart.num_leaves() - span + 1):
290
+ # Try all possible pairs of edges that could generate
291
+ # an edge for that span
292
+ for part in range(1, span):
293
+ lstart = start
294
+ mid = start + part
295
+ rend = start + span
296
+
297
+ for left in chart.select(span=(lstart, mid)):
298
+ for right in chart.select(span=(mid, rend)):
299
+ # Generate all possible combinations of the two edges
300
+ for rule in self._rules:
301
+ edges_added_by_rule = 0
302
+ for newedge in rule.apply(chart, lex, left, right):
303
+ edges_added_by_rule += 1
304
+
305
+ # Output the resulting parses
306
+ return chart.parses(lex.start())
307
+
308
+
309
+ class CCGChart(Chart):
310
+ def __init__(self, tokens):
311
+ Chart.__init__(self, tokens)
312
+
313
+ # Constructs the trees for a given parse. Unfortnunately, the parse trees need to be
314
+ # constructed slightly differently to those in the default Chart class, so it has to
315
+ # be reimplemented
316
+ def _trees(self, edge, complete, memo, tree_class):
317
+ assert complete, "CCGChart cannot build incomplete trees"
318
+
319
+ if edge in memo:
320
+ return memo[edge]
321
+
322
+ if isinstance(edge, CCGLeafEdge):
323
+ word = tree_class(edge.token(), [self._tokens[edge.start()]])
324
+ leaf = tree_class((edge.token(), "Leaf"), [word])
325
+ memo[edge] = [leaf]
326
+ return [leaf]
327
+
328
+ memo[edge] = []
329
+ trees = []
330
+
331
+ for cpl in self.child_pointer_lists(edge):
332
+ child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl]
333
+ for children in itertools.product(*child_choices):
334
+ lhs = (
335
+ Token(
336
+ self._tokens[edge.start() : edge.end()],
337
+ edge.lhs(),
338
+ compute_semantics(children, edge),
339
+ ),
340
+ str(edge.rule()),
341
+ )
342
+ trees.append(tree_class(lhs, children))
343
+
344
+ memo[edge] = trees
345
+ return trees
346
+
347
+
348
+ def compute_semantics(children, edge):
349
+ if children[0].label()[0].semantics() is None:
350
+ return None
351
+
352
+ if len(children) == 2:
353
+ if isinstance(edge.rule(), BackwardCombinator):
354
+ children = [children[1], children[0]]
355
+
356
+ combinator = edge.rule()._combinator
357
+ function = children[0].label()[0].semantics()
358
+ argument = children[1].label()[0].semantics()
359
+
360
+ if isinstance(combinator, UndirectedFunctionApplication):
361
+ return compute_function_semantics(function, argument)
362
+ elif isinstance(combinator, UndirectedComposition):
363
+ return compute_composition_semantics(function, argument)
364
+ elif isinstance(combinator, UndirectedSubstitution):
365
+ return compute_substitution_semantics(function, argument)
366
+ else:
367
+ raise AssertionError("Unsupported combinator '" + combinator + "'")
368
+ else:
369
+ return compute_type_raised_semantics(children[0].label()[0].semantics())
370
+
371
+
372
+ # --------
373
+ # Displaying derivations
374
+ # --------
375
+ def printCCGDerivation(tree):
376
+ # Get the leaves and initial categories
377
+ leafcats = tree.pos()
378
+ leafstr = ""
379
+ catstr = ""
380
+
381
+ # Construct a string with both the leaf word and corresponding
382
+ # category aligned.
383
+ for (leaf, cat) in leafcats:
384
+ str_cat = "%s" % cat
385
+ nextlen = 2 + max(len(leaf), len(str_cat))
386
+ lcatlen = (nextlen - len(str_cat)) // 2
387
+ rcatlen = lcatlen + (nextlen - len(str_cat)) % 2
388
+ catstr += " " * lcatlen + str_cat + " " * rcatlen
389
+ lleaflen = (nextlen - len(leaf)) // 2
390
+ rleaflen = lleaflen + (nextlen - len(leaf)) % 2
391
+ leafstr += " " * lleaflen + leaf + " " * rleaflen
392
+ print(leafstr.rstrip())
393
+ print(catstr.rstrip())
394
+
395
+ # Display the derivation steps
396
+ printCCGTree(0, tree)
397
+
398
+
399
+ # Prints the sequence of derivation steps.
400
+ def printCCGTree(lwidth, tree):
401
+ rwidth = lwidth
402
+
403
+ # Is a leaf (word).
404
+ # Increment the span by the space occupied by the leaf.
405
+ if not isinstance(tree, Tree):
406
+ return 2 + lwidth + len(tree)
407
+
408
+ # Find the width of the current derivation step
409
+ for child in tree:
410
+ rwidth = max(rwidth, printCCGTree(rwidth, child))
411
+
412
+ # Is a leaf node.
413
+ # Don't print anything, but account for the space occupied.
414
+ if not isinstance(tree.label(), tuple):
415
+ return max(
416
+ rwidth, 2 + lwidth + len("%s" % tree.label()), 2 + lwidth + len(tree[0])
417
+ )
418
+
419
+ (token, op) = tree.label()
420
+
421
+ if op == "Leaf":
422
+ return rwidth
423
+
424
+ # Pad to the left with spaces, followed by a sequence of '-'
425
+ # and the derivation rule.
426
+ print(lwidth * " " + (rwidth - lwidth) * "-" + "%s" % op)
427
+ # Print the resulting category on a new line.
428
+ str_res = "%s" % (token.categ())
429
+ if token.semantics() is not None:
430
+ str_res += " {" + str(token.semantics()) + "}"
431
+ respadlen = (rwidth - lwidth - len(str_res)) // 2 + lwidth
432
+ print(respadlen * " " + str_res)
433
+ return rwidth
434
+
435
+
436
+ ### Demonstration code
437
+
438
+ # Construct the lexicon
439
+ lex = fromstring(
440
+ """
441
+ :- S, NP, N, VP # Primitive categories, S is the target primitive
442
+
443
+ Det :: NP/N # Family of words
444
+ Pro :: NP
445
+ TV :: VP/NP
446
+ Modal :: (S\\NP)/VP # Backslashes need to be escaped
447
+
448
+ I => Pro # Word -> Category mapping
449
+ you => Pro
450
+
451
+ the => Det
452
+
453
+ # Variables have the special keyword 'var'
454
+ # '.' prevents permutation
455
+ # ',' prevents composition
456
+ and => var\\.,var/.,var
457
+
458
+ which => (N\\N)/(S/NP)
459
+
460
+ will => Modal # Categories can be either explicit, or families.
461
+ might => Modal
462
+
463
+ cook => TV
464
+ eat => TV
465
+
466
+ mushrooms => N
467
+ parsnips => N
468
+ bacon => N
469
+ """
470
+ )
471
+
472
+
473
+ def demo():
474
+ parser = CCGChartParser(lex, DefaultRuleSet)
475
+ for parse in parser.parse("I might cook and eat the bacon".split()):
476
+ printCCGDerivation(parse)
477
+
478
+
479
+ if __name__ == "__main__":
480
+ demo()
lib/python3.10/site-packages/nltk/ccg/combinator.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """
8
+ CCG Combinators
9
+ """
10
+
11
+ from abc import ABCMeta, abstractmethod
12
+
13
+ from nltk.ccg.api import FunctionalCategory
14
+
15
+
16
+ class UndirectedBinaryCombinator(metaclass=ABCMeta):
17
+ """
18
+ Abstract class for representing a binary combinator.
19
+ Merely defines functions for checking if the function and argument
20
+ are able to be combined, and what the resulting category is.
21
+
22
+ Note that as no assumptions are made as to direction, the unrestricted
23
+ combinators can perform all backward, forward and crossed variations
24
+ of the combinators; these restrictions must be added in the rule
25
+ class.
26
+ """
27
+
28
+ @abstractmethod
29
+ def can_combine(self, function, argument):
30
+ pass
31
+
32
+ @abstractmethod
33
+ def combine(self, function, argument):
34
+ pass
35
+
36
+
37
+ class DirectedBinaryCombinator(metaclass=ABCMeta):
38
+ """
39
+ Wrapper for the undirected binary combinator.
40
+ It takes left and right categories, and decides which is to be
41
+ the function, and which the argument.
42
+ It then decides whether or not they can be combined.
43
+ """
44
+
45
+ @abstractmethod
46
+ def can_combine(self, left, right):
47
+ pass
48
+
49
+ @abstractmethod
50
+ def combine(self, left, right):
51
+ pass
52
+
53
+
54
+ class ForwardCombinator(DirectedBinaryCombinator):
55
+ """
56
+ Class representing combinators where the primary functor is on the left.
57
+
58
+ Takes an undirected combinator, and a predicate which adds constraints
59
+ restricting the cases in which it may apply.
60
+ """
61
+
62
+ def __init__(self, combinator, predicate, suffix=""):
63
+ self._combinator = combinator
64
+ self._predicate = predicate
65
+ self._suffix = suffix
66
+
67
+ def can_combine(self, left, right):
68
+ return self._combinator.can_combine(left, right) and self._predicate(
69
+ left, right
70
+ )
71
+
72
+ def combine(self, left, right):
73
+ yield from self._combinator.combine(left, right)
74
+
75
+ def __str__(self):
76
+ return f">{self._combinator}{self._suffix}"
77
+
78
+
79
+ class BackwardCombinator(DirectedBinaryCombinator):
80
+ """
81
+ The backward equivalent of the ForwardCombinator class.
82
+ """
83
+
84
+ def __init__(self, combinator, predicate, suffix=""):
85
+ self._combinator = combinator
86
+ self._predicate = predicate
87
+ self._suffix = suffix
88
+
89
+ def can_combine(self, left, right):
90
+ return self._combinator.can_combine(right, left) and self._predicate(
91
+ left, right
92
+ )
93
+
94
+ def combine(self, left, right):
95
+ yield from self._combinator.combine(right, left)
96
+
97
+ def __str__(self):
98
+ return f"<{self._combinator}{self._suffix}"
99
+
100
+
101
+ class UndirectedFunctionApplication(UndirectedBinaryCombinator):
102
+ """
103
+ Class representing function application.
104
+ Implements rules of the form:
105
+ X/Y Y -> X (>)
106
+ And the corresponding backwards application rule
107
+ """
108
+
109
+ def can_combine(self, function, argument):
110
+ if not function.is_function():
111
+ return False
112
+
113
+ return not function.arg().can_unify(argument) is None
114
+
115
+ def combine(self, function, argument):
116
+ if not function.is_function():
117
+ return
118
+
119
+ subs = function.arg().can_unify(argument)
120
+ if subs is None:
121
+ return
122
+
123
+ yield function.res().substitute(subs)
124
+
125
+ def __str__(self):
126
+ return ""
127
+
128
+
129
+ # Predicates for function application.
130
+
131
+ # Ensures the left functor takes an argument on the right
132
+ def forwardOnly(left, right):
133
+ return left.dir().is_forward()
134
+
135
+
136
+ # Ensures the right functor takes an argument on the left
137
+ def backwardOnly(left, right):
138
+ return right.dir().is_backward()
139
+
140
+
141
+ # Application combinator instances
142
+ ForwardApplication = ForwardCombinator(UndirectedFunctionApplication(), forwardOnly)
143
+ BackwardApplication = BackwardCombinator(UndirectedFunctionApplication(), backwardOnly)
144
+
145
+
146
+ class UndirectedComposition(UndirectedBinaryCombinator):
147
+ """
148
+ Functional composition (harmonic) combinator.
149
+ Implements rules of the form
150
+ X/Y Y/Z -> X/Z (B>)
151
+ And the corresponding backwards and crossed variations.
152
+ """
153
+
154
+ def can_combine(self, function, argument):
155
+ # Can only combine two functions, and both functions must
156
+ # allow composition.
157
+ if not (function.is_function() and argument.is_function()):
158
+ return False
159
+ if function.dir().can_compose() and argument.dir().can_compose():
160
+ return not function.arg().can_unify(argument.res()) is None
161
+ return False
162
+
163
+ def combine(self, function, argument):
164
+ if not (function.is_function() and argument.is_function()):
165
+ return
166
+ if function.dir().can_compose() and argument.dir().can_compose():
167
+ subs = function.arg().can_unify(argument.res())
168
+ if subs is not None:
169
+ yield FunctionalCategory(
170
+ function.res().substitute(subs),
171
+ argument.arg().substitute(subs),
172
+ argument.dir(),
173
+ )
174
+
175
+ def __str__(self):
176
+ return "B"
177
+
178
+
179
+ # Predicates for restricting application of straight composition.
180
+ def bothForward(left, right):
181
+ return left.dir().is_forward() and right.dir().is_forward()
182
+
183
+
184
+ def bothBackward(left, right):
185
+ return left.dir().is_backward() and right.dir().is_backward()
186
+
187
+
188
+ # Predicates for crossed composition
189
+ def crossedDirs(left, right):
190
+ return left.dir().is_forward() and right.dir().is_backward()
191
+
192
+
193
+ def backwardBxConstraint(left, right):
194
+ # The functors must be crossed inwards
195
+ if not crossedDirs(left, right):
196
+ return False
197
+ # Permuting combinators must be allowed
198
+ if not left.dir().can_cross() and right.dir().can_cross():
199
+ return False
200
+ # The resulting argument category is restricted to be primitive
201
+ return left.arg().is_primitive()
202
+
203
+
204
+ # Straight composition combinators
205
+ ForwardComposition = ForwardCombinator(UndirectedComposition(), forwardOnly)
206
+ BackwardComposition = BackwardCombinator(UndirectedComposition(), backwardOnly)
207
+
208
+ # Backward crossed composition
209
+ BackwardBx = BackwardCombinator(
210
+ UndirectedComposition(), backwardBxConstraint, suffix="x"
211
+ )
212
+
213
+
214
+ class UndirectedSubstitution(UndirectedBinaryCombinator):
215
+ r"""
216
+ Substitution (permutation) combinator.
217
+ Implements rules of the form
218
+ Y/Z (X\Y)/Z -> X/Z (<Sx)
219
+ And other variations.
220
+ """
221
+
222
+ def can_combine(self, function, argument):
223
+ if function.is_primitive() or argument.is_primitive():
224
+ return False
225
+
226
+ # These could potentially be moved to the predicates, as the
227
+ # constraints may not be general to all languages.
228
+ if function.res().is_primitive():
229
+ return False
230
+ if not function.arg().is_primitive():
231
+ return False
232
+
233
+ if not (function.dir().can_compose() and argument.dir().can_compose()):
234
+ return False
235
+ return (function.res().arg() == argument.res()) and (
236
+ function.arg() == argument.arg()
237
+ )
238
+
239
+ def combine(self, function, argument):
240
+ if self.can_combine(function, argument):
241
+ yield FunctionalCategory(
242
+ function.res().res(), argument.arg(), argument.dir()
243
+ )
244
+
245
+ def __str__(self):
246
+ return "S"
247
+
248
+
249
+ # Predicate for forward substitution
250
+ def forwardSConstraint(left, right):
251
+ if not bothForward(left, right):
252
+ return False
253
+ return left.res().dir().is_forward() and left.arg().is_primitive()
254
+
255
+
256
+ # Predicate for backward crossed substitution
257
+ def backwardSxConstraint(left, right):
258
+ if not left.dir().can_cross() and right.dir().can_cross():
259
+ return False
260
+ if not bothForward(left, right):
261
+ return False
262
+ return right.res().dir().is_backward() and right.arg().is_primitive()
263
+
264
+
265
+ # Instances of substitution combinators
266
+ ForwardSubstitution = ForwardCombinator(UndirectedSubstitution(), forwardSConstraint)
267
+ BackwardSx = BackwardCombinator(UndirectedSubstitution(), backwardSxConstraint, "x")
268
+
269
+
270
+ # Retrieves the left-most functional category.
271
+ # ie, (N\N)/(S/NP) => N\N
272
+ def innermostFunction(categ):
273
+ while categ.res().is_function():
274
+ categ = categ.res()
275
+ return categ
276
+
277
+
278
+ class UndirectedTypeRaise(UndirectedBinaryCombinator):
279
+ """
280
+ Undirected combinator for type raising.
281
+ """
282
+
283
+ def can_combine(self, function, arg):
284
+ # The argument must be a function.
285
+ # The restriction that arg.res() must be a function
286
+ # merely reduces redundant type-raising; if arg.res() is
287
+ # primitive, we have:
288
+ # X Y\X =>(<T) Y/(Y\X) Y\X =>(>) Y
289
+ # which is equivalent to
290
+ # X Y\X =>(<) Y
291
+ if not (arg.is_function() and arg.res().is_function()):
292
+ return False
293
+
294
+ arg = innermostFunction(arg)
295
+
296
+ # left, arg_categ are undefined!
297
+ subs = left.can_unify(arg_categ.arg())
298
+ if subs is not None:
299
+ return True
300
+ return False
301
+
302
+ def combine(self, function, arg):
303
+ if not (
304
+ function.is_primitive() and arg.is_function() and arg.res().is_function()
305
+ ):
306
+ return
307
+
308
+ # Type-raising matches only the innermost application.
309
+ arg = innermostFunction(arg)
310
+
311
+ subs = function.can_unify(arg.arg())
312
+ if subs is not None:
313
+ xcat = arg.res().substitute(subs)
314
+ yield FunctionalCategory(
315
+ xcat, FunctionalCategory(xcat, function, arg.dir()), -(arg.dir())
316
+ )
317
+
318
+ def __str__(self):
319
+ return "T"
320
+
321
+
322
+ # Predicates for type-raising
323
+ # The direction of the innermost category must be towards
324
+ # the primary functor.
325
+ # The restriction that the variable must be primitive is not
326
+ # common to all versions of CCGs; some authors have other restrictions.
327
+ def forwardTConstraint(left, right):
328
+ arg = innermostFunction(right)
329
+ return arg.dir().is_backward() and arg.res().is_primitive()
330
+
331
+
332
+ def backwardTConstraint(left, right):
333
+ arg = innermostFunction(left)
334
+ return arg.dir().is_forward() and arg.res().is_primitive()
335
+
336
+
337
+ # Instances of type-raising combinators
338
+ ForwardT = ForwardCombinator(UndirectedTypeRaise(), forwardTConstraint)
339
+ BackwardT = BackwardCombinator(UndirectedTypeRaise(), backwardTConstraint)
lib/python3.10/site-packages/nltk/ccg/lexicon.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """
8
+ CCG Lexicons
9
+ """
10
+
11
+ import re
12
+ from collections import defaultdict
13
+
14
+ from nltk.ccg.api import CCGVar, Direction, FunctionalCategory, PrimitiveCategory
15
+ from nltk.internals import deprecated
16
+ from nltk.sem.logic import Expression
17
+
18
+ # ------------
19
+ # Regular expressions used for parsing components of the lexicon
20
+ # ------------
21
+
22
+ # Parses a primitive category and subscripts
23
+ PRIM_RE = re.compile(r"""([A-Za-z]+)(\[[A-Za-z,]+\])?""")
24
+
25
+ # Separates the next primitive category from the remainder of the
26
+ # string
27
+ NEXTPRIM_RE = re.compile(r"""([A-Za-z]+(?:\[[A-Za-z,]+\])?)(.*)""")
28
+
29
+ # Separates the next application operator from the remainder
30
+ APP_RE = re.compile(r"""([\\/])([.,]?)([.,]?)(.*)""")
31
+
32
+ # Parses the definition of the right-hand side (rhs) of either a word or a family
33
+ LEX_RE = re.compile(r"""([\S_]+)\s*(::|[-=]+>)\s*(.+)""", re.UNICODE)
34
+
35
+ # Parses the right hand side that contains category and maybe semantic predicate
36
+ RHS_RE = re.compile(r"""([^{}]*[^ {}])\s*(\{[^}]+\})?""", re.UNICODE)
37
+
38
+ # Parses the semantic predicate
39
+ SEMANTICS_RE = re.compile(r"""\{([^}]+)\}""", re.UNICODE)
40
+
41
+ # Strips comments from a line
42
+ COMMENTS_RE = re.compile("""([^#]*)(?:#.*)?""")
43
+
44
+
45
+ class Token:
46
+ """
47
+ Class representing a token.
48
+
49
+ token => category {semantics}
50
+ e.g. eat => S\\var[pl]/var {\\x y.eat(x,y)}
51
+
52
+ * `token` (string)
53
+ * `categ` (string)
54
+ * `semantics` (Expression)
55
+ """
56
+
57
+ def __init__(self, token, categ, semantics=None):
58
+ self._token = token
59
+ self._categ = categ
60
+ self._semantics = semantics
61
+
62
+ def categ(self):
63
+ return self._categ
64
+
65
+ def semantics(self):
66
+ return self._semantics
67
+
68
+ def __str__(self):
69
+ semantics_str = ""
70
+ if self._semantics is not None:
71
+ semantics_str = " {" + str(self._semantics) + "}"
72
+ return "" + str(self._categ) + semantics_str
73
+
74
+ def __cmp__(self, other):
75
+ if not isinstance(other, Token):
76
+ return -1
77
+ return cmp((self._categ, self._semantics), other.categ(), other.semantics())
78
+
79
+
80
+ class CCGLexicon:
81
+ """
82
+ Class representing a lexicon for CCG grammars.
83
+
84
+ * `primitives`: The list of primitive categories for the lexicon
85
+ * `families`: Families of categories
86
+ * `entries`: A mapping of words to possible categories
87
+ """
88
+
89
+ def __init__(self, start, primitives, families, entries):
90
+ self._start = PrimitiveCategory(start)
91
+ self._primitives = primitives
92
+ self._families = families
93
+ self._entries = entries
94
+
95
+ def categories(self, word):
96
+ """
97
+ Returns all the possible categories for a word
98
+ """
99
+ return self._entries[word]
100
+
101
+ def start(self):
102
+ """
103
+ Return the target category for the parser
104
+ """
105
+ return self._start
106
+
107
+ def __str__(self):
108
+ """
109
+ String representation of the lexicon. Used for debugging.
110
+ """
111
+ string = ""
112
+ first = True
113
+ for ident in sorted(self._entries):
114
+ if not first:
115
+ string = string + "\n"
116
+ string = string + ident + " => "
117
+
118
+ first = True
119
+ for cat in self._entries[ident]:
120
+ if not first:
121
+ string = string + " | "
122
+ else:
123
+ first = False
124
+ string = string + "%s" % cat
125
+ return string
126
+
127
+
128
+ # -----------
129
+ # Parsing lexicons
130
+ # -----------
131
+
132
+
133
+ def matchBrackets(string):
134
+ """
135
+ Separate the contents matching the first set of brackets from the rest of
136
+ the input.
137
+ """
138
+ rest = string[1:]
139
+ inside = "("
140
+
141
+ while rest != "" and not rest.startswith(")"):
142
+ if rest.startswith("("):
143
+ (part, rest) = matchBrackets(rest)
144
+ inside = inside + part
145
+ else:
146
+ inside = inside + rest[0]
147
+ rest = rest[1:]
148
+ if rest.startswith(")"):
149
+ return (inside + ")", rest[1:])
150
+ raise AssertionError("Unmatched bracket in string '" + string + "'")
151
+
152
+
153
+ def nextCategory(string):
154
+ """
155
+ Separate the string for the next portion of the category from the rest
156
+ of the string
157
+ """
158
+ if string.startswith("("):
159
+ return matchBrackets(string)
160
+ return NEXTPRIM_RE.match(string).groups()
161
+
162
+
163
+ def parseApplication(app):
164
+ """
165
+ Parse an application operator
166
+ """
167
+ return Direction(app[0], app[1:])
168
+
169
+
170
+ def parseSubscripts(subscr):
171
+ """
172
+ Parse the subscripts for a primitive category
173
+ """
174
+ if subscr:
175
+ return subscr[1:-1].split(",")
176
+ return []
177
+
178
+
179
+ def parsePrimitiveCategory(chunks, primitives, families, var):
180
+ """
181
+ Parse a primitive category
182
+
183
+ If the primitive is the special category 'var', replace it with the
184
+ correct `CCGVar`.
185
+ """
186
+ if chunks[0] == "var":
187
+ if chunks[1] is None:
188
+ if var is None:
189
+ var = CCGVar()
190
+ return (var, var)
191
+
192
+ catstr = chunks[0]
193
+ if catstr in families:
194
+ (cat, cvar) = families[catstr]
195
+ if var is None:
196
+ var = cvar
197
+ else:
198
+ cat = cat.substitute([(cvar, var)])
199
+ return (cat, var)
200
+
201
+ if catstr in primitives:
202
+ subscrs = parseSubscripts(chunks[1])
203
+ return (PrimitiveCategory(catstr, subscrs), var)
204
+ raise AssertionError(
205
+ "String '" + catstr + "' is neither a family nor primitive category."
206
+ )
207
+
208
+
209
+ def augParseCategory(line, primitives, families, var=None):
210
+ """
211
+ Parse a string representing a category, and returns a tuple with
212
+ (possibly) the CCG variable for the category
213
+ """
214
+ (cat_string, rest) = nextCategory(line)
215
+
216
+ if cat_string.startswith("("):
217
+ (res, var) = augParseCategory(cat_string[1:-1], primitives, families, var)
218
+
219
+ else:
220
+ (res, var) = parsePrimitiveCategory(
221
+ PRIM_RE.match(cat_string).groups(), primitives, families, var
222
+ )
223
+
224
+ while rest != "":
225
+ app = APP_RE.match(rest).groups()
226
+ direction = parseApplication(app[0:3])
227
+ rest = app[3]
228
+
229
+ (cat_string, rest) = nextCategory(rest)
230
+ if cat_string.startswith("("):
231
+ (arg, var) = augParseCategory(cat_string[1:-1], primitives, families, var)
232
+ else:
233
+ (arg, var) = parsePrimitiveCategory(
234
+ PRIM_RE.match(cat_string).groups(), primitives, families, var
235
+ )
236
+ res = FunctionalCategory(res, arg, direction)
237
+
238
+ return (res, var)
239
+
240
+
241
+ def fromstring(lex_str, include_semantics=False):
242
+ """
243
+ Convert string representation into a lexicon for CCGs.
244
+ """
245
+ CCGVar.reset_id()
246
+ primitives = []
247
+ families = {}
248
+ entries = defaultdict(list)
249
+ for line in lex_str.splitlines():
250
+ # Strip comments and leading/trailing whitespace.
251
+ line = COMMENTS_RE.match(line).groups()[0].strip()
252
+ if line == "":
253
+ continue
254
+
255
+ if line.startswith(":-"):
256
+ # A line of primitive categories.
257
+ # The first one is the target category
258
+ # ie, :- S, N, NP, VP
259
+ primitives = primitives + [
260
+ prim.strip() for prim in line[2:].strip().split(",")
261
+ ]
262
+ else:
263
+ # Either a family definition, or a word definition
264
+ (ident, sep, rhs) = LEX_RE.match(line).groups()
265
+ (catstr, semantics_str) = RHS_RE.match(rhs).groups()
266
+ (cat, var) = augParseCategory(catstr, primitives, families)
267
+
268
+ if sep == "::":
269
+ # Family definition
270
+ # ie, Det :: NP/N
271
+ families[ident] = (cat, var)
272
+ else:
273
+ semantics = None
274
+ if include_semantics is True:
275
+ if semantics_str is None:
276
+ raise AssertionError(
277
+ line
278
+ + " must contain semantics because include_semantics is set to True"
279
+ )
280
+ else:
281
+ semantics = Expression.fromstring(
282
+ SEMANTICS_RE.match(semantics_str).groups()[0]
283
+ )
284
+ # Word definition
285
+ # ie, which => (N\N)/(S/NP)
286
+ entries[ident].append(Token(ident, cat, semantics))
287
+ return CCGLexicon(primitives[0], primitives, families, entries)
288
+
289
+
290
+ @deprecated("Use fromstring() instead.")
291
+ def parseLexicon(lex_str):
292
+ return fromstring(lex_str)
293
+
294
+
295
+ openccg_tinytiny = fromstring(
296
+ """
297
+ # Rather minimal lexicon based on the openccg `tinytiny' grammar.
298
+ # Only incorporates a subset of the morphological subcategories, however.
299
+ :- S,NP,N # Primitive categories
300
+ Det :: NP/N # Determiners
301
+ Pro :: NP
302
+ IntransVsg :: S\\NP[sg] # Tensed intransitive verbs (singular)
303
+ IntransVpl :: S\\NP[pl] # Plural
304
+ TransVsg :: S\\NP[sg]/NP # Tensed transitive verbs (singular)
305
+ TransVpl :: S\\NP[pl]/NP # Plural
306
+
307
+ the => NP[sg]/N[sg]
308
+ the => NP[pl]/N[pl]
309
+
310
+ I => Pro
311
+ me => Pro
312
+ we => Pro
313
+ us => Pro
314
+
315
+ book => N[sg]
316
+ books => N[pl]
317
+
318
+ peach => N[sg]
319
+ peaches => N[pl]
320
+
321
+ policeman => N[sg]
322
+ policemen => N[pl]
323
+
324
+ boy => N[sg]
325
+ boys => N[pl]
326
+
327
+ sleep => IntransVsg
328
+ sleep => IntransVpl
329
+
330
+ eat => IntransVpl
331
+ eat => TransVpl
332
+ eats => IntransVsg
333
+ eats => TransVsg
334
+
335
+ see => TransVpl
336
+ sees => TransVsg
337
+ """
338
+ )
lib/python3.10/site-packages/nltk/ccg/logic.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Tanin Na Nakorn (@tanin)
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """
8
+ Helper functions for CCG semantics computation
9
+ """
10
+
11
+ from nltk.sem.logic import *
12
+
13
+
14
+ def compute_type_raised_semantics(semantics):
15
+ core = semantics
16
+ parent = None
17
+ while isinstance(core, LambdaExpression):
18
+ parent = core
19
+ core = core.term
20
+
21
+ var = Variable("F")
22
+ while var in core.free():
23
+ var = unique_variable(pattern=var)
24
+ core = ApplicationExpression(FunctionVariableExpression(var), core)
25
+
26
+ if parent is not None:
27
+ parent.term = core
28
+ else:
29
+ semantics = core
30
+
31
+ return LambdaExpression(var, semantics)
32
+
33
+
34
+ def compute_function_semantics(function, argument):
35
+ return ApplicationExpression(function, argument).simplify()
36
+
37
+
38
+ def compute_composition_semantics(function, argument):
39
+ assert isinstance(argument, LambdaExpression), (
40
+ "`" + str(argument) + "` must be a lambda expression"
41
+ )
42
+ return LambdaExpression(
43
+ argument.variable, ApplicationExpression(function, argument.term).simplify()
44
+ )
45
+
46
+
47
+ def compute_substitution_semantics(function, argument):
48
+ assert isinstance(function, LambdaExpression) and isinstance(
49
+ function.term, LambdaExpression
50
+ ), ("`" + str(function) + "` must be a lambda expression with 2 arguments")
51
+ assert isinstance(argument, LambdaExpression), (
52
+ "`" + str(argument) + "` must be a lambda expression"
53
+ )
54
+
55
+ new_argument = ApplicationExpression(
56
+ argument, VariableExpression(function.variable)
57
+ ).simplify()
58
+ new_term = ApplicationExpression(function.term, new_argument).simplify()
59
+
60
+ return LambdaExpression(function.variable, new_term)
lib/python3.10/site-packages/nltk/classify/__init__.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Classifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Classes and interfaces for labeling tokens with category labels (or
10
+ "class labels"). Typically, labels are represented with strings
11
+ (such as ``'health'`` or ``'sports'``). Classifiers can be used to
12
+ perform a wide range of classification tasks. For example,
13
+ classifiers can be used...
14
+
15
+ - to classify documents by topic
16
+ - to classify ambiguous words by which word sense is intended
17
+ - to classify acoustic signals by which phoneme they represent
18
+ - to classify sentences by their author
19
+
20
+ Features
21
+ ========
22
+ In order to decide which category label is appropriate for a given
23
+ token, classifiers examine one or more 'features' of the token. These
24
+ "features" are typically chosen by hand, and indicate which aspects
25
+ of the token are relevant to the classification decision. For
26
+ example, a document classifier might use a separate feature for each
27
+ word, recording how often that word occurred in the document.
28
+
29
+ Featuresets
30
+ ===========
31
+ The features describing a token are encoded using a "featureset",
32
+ which is a dictionary that maps from "feature names" to "feature
33
+ values". Feature names are unique strings that indicate what aspect
34
+ of the token is encoded by the feature. Examples include
35
+ ``'prevword'``, for a feature whose value is the previous word; and
36
+ ``'contains-word(library)'`` for a feature that is true when a document
37
+ contains the word ``'library'``. Feature values are typically
38
+ booleans, numbers, or strings, depending on which feature they
39
+ describe.
40
+
41
+ Featuresets are typically constructed using a "feature detector"
42
+ (also known as a "feature extractor"). A feature detector is a
43
+ function that takes a token (and sometimes information about its
44
+ context) as its input, and returns a featureset describing that token.
45
+ For example, the following feature detector converts a document
46
+ (stored as a list of words) to a featureset describing the set of
47
+ words included in the document:
48
+
49
+ >>> # Define a feature detector function.
50
+ >>> def document_features(document):
51
+ ... return dict([('contains-word(%s)' % w, True) for w in document])
52
+
53
+ Feature detectors are typically applied to each token before it is fed
54
+ to the classifier:
55
+
56
+ >>> # Classify each Gutenberg document.
57
+ >>> from nltk.corpus import gutenberg
58
+ >>> for fileid in gutenberg.fileids(): # doctest: +SKIP
59
+ ... doc = gutenberg.words(fileid) # doctest: +SKIP
60
+ ... print(fileid, classifier.classify(document_features(doc))) # doctest: +SKIP
61
+
62
+ The parameters that a feature detector expects will vary, depending on
63
+ the task and the needs of the feature detector. For example, a
64
+ feature detector for word sense disambiguation (WSD) might take as its
65
+ input a sentence, and the index of a word that should be classified,
66
+ and return a featureset for that word. The following feature detector
67
+ for WSD includes features describing the left and right contexts of
68
+ the target word:
69
+
70
+ >>> def wsd_features(sentence, index):
71
+ ... featureset = {}
72
+ ... for i in range(max(0, index-3), index):
73
+ ... featureset['left-context(%s)' % sentence[i]] = True
74
+ ... for i in range(index, max(index+3, len(sentence))):
75
+ ... featureset['right-context(%s)' % sentence[i]] = True
76
+ ... return featureset
77
+
78
+ Training Classifiers
79
+ ====================
80
+ Most classifiers are built by training them on a list of hand-labeled
81
+ examples, known as the "training set". Training sets are represented
82
+ as lists of ``(featuredict, label)`` tuples.
83
+ """
84
+
85
+ from nltk.classify.api import ClassifierI, MultiClassifierI
86
+ from nltk.classify.decisiontree import DecisionTreeClassifier
87
+ from nltk.classify.maxent import (
88
+ BinaryMaxentFeatureEncoding,
89
+ ConditionalExponentialClassifier,
90
+ MaxentClassifier,
91
+ TypedMaxentFeatureEncoding,
92
+ )
93
+ from nltk.classify.megam import call_megam, config_megam
94
+ from nltk.classify.naivebayes import NaiveBayesClassifier
95
+ from nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier
96
+ from nltk.classify.rte_classify import RTEFeatureExtractor, rte_classifier, rte_features
97
+ from nltk.classify.scikitlearn import SklearnClassifier
98
+ from nltk.classify.senna import Senna
99
+ from nltk.classify.textcat import TextCat
100
+ from nltk.classify.util import accuracy, apply_features, log_likelihood
101
+ from nltk.classify.weka import WekaClassifier, config_weka
lib/python3.10/site-packages/nltk/classify/api.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Classifier Interface
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Interfaces for labeling tokens with category labels (or "class labels").
11
+
12
+ ``ClassifierI`` is a standard interface for "single-category
13
+ classification", in which the set of categories is known, the number
14
+ of categories is finite, and each text belongs to exactly one
15
+ category.
16
+
17
+ ``MultiClassifierI`` is a standard interface for "multi-category
18
+ classification", which is like single-category classification except
19
+ that each text belongs to zero or more categories.
20
+ """
21
+ from nltk.internals import overridden
22
+
23
+ ##//////////////////////////////////////////////////////
24
+ # { Classification Interfaces
25
+ ##//////////////////////////////////////////////////////
26
+
27
+
28
+ class ClassifierI:
29
+ """
30
+ A processing interface for labeling tokens with a single category
31
+ label (or "class"). Labels are typically strs or
32
+ ints, but can be any immutable type. The set of labels
33
+ that the classifier chooses from must be fixed and finite.
34
+
35
+ Subclasses must define:
36
+ - ``labels()``
37
+ - either ``classify()`` or ``classify_many()`` (or both)
38
+
39
+ Subclasses may define:
40
+ - either ``prob_classify()`` or ``prob_classify_many()`` (or both)
41
+ """
42
+
43
+ def labels(self):
44
+ """
45
+ :return: the list of category labels used by this classifier.
46
+ :rtype: list of (immutable)
47
+ """
48
+ raise NotImplementedError()
49
+
50
+ def classify(self, featureset):
51
+ """
52
+ :return: the most appropriate label for the given featureset.
53
+ :rtype: label
54
+ """
55
+ if overridden(self.classify_many):
56
+ return self.classify_many([featureset])[0]
57
+ else:
58
+ raise NotImplementedError()
59
+
60
+ def prob_classify(self, featureset):
61
+ """
62
+ :return: a probability distribution over labels for the given
63
+ featureset.
64
+ :rtype: ProbDistI
65
+ """
66
+ if overridden(self.prob_classify_many):
67
+ return self.prob_classify_many([featureset])[0]
68
+ else:
69
+ raise NotImplementedError()
70
+
71
+ def classify_many(self, featuresets):
72
+ """
73
+ Apply ``self.classify()`` to each element of ``featuresets``. I.e.:
74
+
75
+ return [self.classify(fs) for fs in featuresets]
76
+
77
+ :rtype: list(label)
78
+ """
79
+ return [self.classify(fs) for fs in featuresets]
80
+
81
+ def prob_classify_many(self, featuresets):
82
+ """
83
+ Apply ``self.prob_classify()`` to each element of ``featuresets``. I.e.:
84
+
85
+ return [self.prob_classify(fs) for fs in featuresets]
86
+
87
+ :rtype: list(ProbDistI)
88
+ """
89
+ return [self.prob_classify(fs) for fs in featuresets]
90
+
91
+
92
+ class MultiClassifierI:
93
+ """
94
+ A processing interface for labeling tokens with zero or more
95
+ category labels (or "labels"). Labels are typically strs
96
+ or ints, but can be any immutable type. The set of labels
97
+ that the multi-classifier chooses from must be fixed and finite.
98
+
99
+ Subclasses must define:
100
+ - ``labels()``
101
+ - either ``classify()`` or ``classify_many()`` (or both)
102
+
103
+ Subclasses may define:
104
+ - either ``prob_classify()`` or ``prob_classify_many()`` (or both)
105
+ """
106
+
107
+ def labels(self):
108
+ """
109
+ :return: the list of category labels used by this classifier.
110
+ :rtype: list of (immutable)
111
+ """
112
+ raise NotImplementedError()
113
+
114
+ def classify(self, featureset):
115
+ """
116
+ :return: the most appropriate set of labels for the given featureset.
117
+ :rtype: set(label)
118
+ """
119
+ if overridden(self.classify_many):
120
+ return self.classify_many([featureset])[0]
121
+ else:
122
+ raise NotImplementedError()
123
+
124
+ def prob_classify(self, featureset):
125
+ """
126
+ :return: a probability distribution over sets of labels for the
127
+ given featureset.
128
+ :rtype: ProbDistI
129
+ """
130
+ if overridden(self.prob_classify_many):
131
+ return self.prob_classify_many([featureset])[0]
132
+ else:
133
+ raise NotImplementedError()
134
+
135
+ def classify_many(self, featuresets):
136
+ """
137
+ Apply ``self.classify()`` to each element of ``featuresets``. I.e.:
138
+
139
+ return [self.classify(fs) for fs in featuresets]
140
+
141
+ :rtype: list(set(label))
142
+ """
143
+ return [self.classify(fs) for fs in featuresets]
144
+
145
+ def prob_classify_many(self, featuresets):
146
+ """
147
+ Apply ``self.prob_classify()`` to each element of ``featuresets``. I.e.:
148
+
149
+ return [self.prob_classify(fs) for fs in featuresets]
150
+
151
+ :rtype: list(ProbDistI)
152
+ """
153
+ return [self.prob_classify(fs) for fs in featuresets]
154
+
155
+
156
+ # # [XX] IN PROGRESS:
157
+ # class SequenceClassifierI:
158
+ # """
159
+ # A processing interface for labeling sequences of tokens with a
160
+ # single category label (or "class"). Labels are typically
161
+ # strs or ints, but can be any immutable type. The set
162
+ # of labels that the classifier chooses from must be fixed and
163
+ # finite.
164
+ # """
165
+ # def labels(self):
166
+ # """
167
+ # :return: the list of category labels used by this classifier.
168
+ # :rtype: list of (immutable)
169
+ # """
170
+ # raise NotImplementedError()
171
+
172
+ # def prob_classify(self, featureset):
173
+ # """
174
+ # Return a probability distribution over labels for the given
175
+ # featureset.
176
+
177
+ # If ``featureset`` is a list of featuresets, then return a
178
+ # corresponding list containing the probability distribution
179
+ # over labels for each of the given featuresets, where the
180
+ # *i*\ th element of this list is the most appropriate label for
181
+ # the *i*\ th element of ``featuresets``.
182
+ # """
183
+ # raise NotImplementedError()
184
+
185
+ # def classify(self, featureset):
186
+ # """
187
+ # Return the most appropriate label for the given featureset.
188
+
189
+ # If ``featureset`` is a list of featuresets, then return a
190
+ # corresponding list containing the most appropriate label for
191
+ # each of the given featuresets, where the *i*\ th element of
192
+ # this list is the most appropriate label for the *i*\ th element
193
+ # of ``featuresets``.
194
+ # """
195
+ # raise NotImplementedError()
lib/python3.10/site-packages/nltk/classify/decisiontree.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Decision Tree Classifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A classifier model that decides which label to assign to a token on
10
+ the basis of a tree structure, where branches correspond to conditions
11
+ on feature values, and leaves correspond to label assignments.
12
+ """
13
+
14
+ from collections import defaultdict
15
+
16
+ from nltk.classify.api import ClassifierI
17
+ from nltk.probability import FreqDist, MLEProbDist, entropy
18
+
19
+
20
+ class DecisionTreeClassifier(ClassifierI):
21
+ def __init__(self, label, feature_name=None, decisions=None, default=None):
22
+ """
23
+ :param label: The most likely label for tokens that reach
24
+ this node in the decision tree. If this decision tree
25
+ has no children, then this label will be assigned to
26
+ any token that reaches this decision tree.
27
+ :param feature_name: The name of the feature that this
28
+ decision tree selects for.
29
+ :param decisions: A dictionary mapping from feature values
30
+ for the feature identified by ``feature_name`` to
31
+ child decision trees.
32
+ :param default: The child that will be used if the value of
33
+ feature ``feature_name`` does not match any of the keys in
34
+ ``decisions``. This is used when constructing binary
35
+ decision trees.
36
+ """
37
+ self._label = label
38
+ self._fname = feature_name
39
+ self._decisions = decisions
40
+ self._default = default
41
+
42
+ def labels(self):
43
+ labels = [self._label]
44
+ if self._decisions is not None:
45
+ for dt in self._decisions.values():
46
+ labels.extend(dt.labels())
47
+ if self._default is not None:
48
+ labels.extend(self._default.labels())
49
+ return list(set(labels))
50
+
51
+ def classify(self, featureset):
52
+ # Decision leaf:
53
+ if self._fname is None:
54
+ return self._label
55
+
56
+ # Decision tree:
57
+ fval = featureset.get(self._fname)
58
+ if fval in self._decisions:
59
+ return self._decisions[fval].classify(featureset)
60
+ elif self._default is not None:
61
+ return self._default.classify(featureset)
62
+ else:
63
+ return self._label
64
+
65
+ def error(self, labeled_featuresets):
66
+ errors = 0
67
+ for featureset, label in labeled_featuresets:
68
+ if self.classify(featureset) != label:
69
+ errors += 1
70
+ return errors / len(labeled_featuresets)
71
+
72
+ def pretty_format(self, width=70, prefix="", depth=4):
73
+ """
74
+ Return a string containing a pretty-printed version of this
75
+ decision tree. Each line in this string corresponds to a
76
+ single decision tree node or leaf, and indentation is used to
77
+ display the structure of the decision tree.
78
+ """
79
+ # [xx] display default!!
80
+ if self._fname is None:
81
+ n = width - len(prefix) - 15
82
+ return "{}{} {}\n".format(prefix, "." * n, self._label)
83
+ s = ""
84
+ for i, (fval, result) in enumerate(
85
+ sorted(
86
+ self._decisions.items(),
87
+ key=lambda item: (item[0] in [None, False, True], str(item[0]).lower()),
88
+ )
89
+ ):
90
+ hdr = f"{prefix}{self._fname}={fval}? "
91
+ n = width - 15 - len(hdr)
92
+ s += "{}{} {}\n".format(hdr, "." * (n), result._label)
93
+ if result._fname is not None and depth > 1:
94
+ s += result.pretty_format(width, prefix + " ", depth - 1)
95
+ if self._default is not None:
96
+ n = width - len(prefix) - 21
97
+ s += "{}else: {} {}\n".format(prefix, "." * n, self._default._label)
98
+ if self._default._fname is not None and depth > 1:
99
+ s += self._default.pretty_format(width, prefix + " ", depth - 1)
100
+ return s
101
+
102
+ def pseudocode(self, prefix="", depth=4):
103
+ """
104
+ Return a string representation of this decision tree that
105
+ expresses the decisions it makes as a nested set of pseudocode
106
+ if statements.
107
+ """
108
+ if self._fname is None:
109
+ return f"{prefix}return {self._label!r}\n"
110
+ s = ""
111
+ for (fval, result) in sorted(
112
+ self._decisions.items(),
113
+ key=lambda item: (item[0] in [None, False, True], str(item[0]).lower()),
114
+ ):
115
+ s += f"{prefix}if {self._fname} == {fval!r}: "
116
+ if result._fname is not None and depth > 1:
117
+ s += "\n" + result.pseudocode(prefix + " ", depth - 1)
118
+ else:
119
+ s += f"return {result._label!r}\n"
120
+ if self._default is not None:
121
+ if len(self._decisions) == 1:
122
+ s += "{}if {} != {!r}: ".format(
123
+ prefix, self._fname, list(self._decisions.keys())[0]
124
+ )
125
+ else:
126
+ s += f"{prefix}else: "
127
+ if self._default._fname is not None and depth > 1:
128
+ s += "\n" + self._default.pseudocode(prefix + " ", depth - 1)
129
+ else:
130
+ s += f"return {self._default._label!r}\n"
131
+ return s
132
+
133
+ def __str__(self):
134
+ return self.pretty_format()
135
+
136
+ @staticmethod
137
+ def train(
138
+ labeled_featuresets,
139
+ entropy_cutoff=0.05,
140
+ depth_cutoff=100,
141
+ support_cutoff=10,
142
+ binary=False,
143
+ feature_values=None,
144
+ verbose=False,
145
+ ):
146
+ """
147
+ :param binary: If true, then treat all feature/value pairs as
148
+ individual binary features, rather than using a single n-way
149
+ branch for each feature.
150
+ """
151
+ # Collect a list of all feature names.
152
+ feature_names = set()
153
+ for featureset, label in labeled_featuresets:
154
+ for fname in featureset:
155
+ feature_names.add(fname)
156
+
157
+ # Collect a list of the values each feature can take.
158
+ if feature_values is None and binary:
159
+ feature_values = defaultdict(set)
160
+ for featureset, label in labeled_featuresets:
161
+ for fname, fval in featureset.items():
162
+ feature_values[fname].add(fval)
163
+
164
+ # Start with a stump.
165
+ if not binary:
166
+ tree = DecisionTreeClassifier.best_stump(
167
+ feature_names, labeled_featuresets, verbose
168
+ )
169
+ else:
170
+ tree = DecisionTreeClassifier.best_binary_stump(
171
+ feature_names, labeled_featuresets, feature_values, verbose
172
+ )
173
+
174
+ # Refine the stump.
175
+ tree.refine(
176
+ labeled_featuresets,
177
+ entropy_cutoff,
178
+ depth_cutoff - 1,
179
+ support_cutoff,
180
+ binary,
181
+ feature_values,
182
+ verbose,
183
+ )
184
+
185
+ # Return it
186
+ return tree
187
+
188
+ @staticmethod
189
+ def leaf(labeled_featuresets):
190
+ label = FreqDist(label for (featureset, label) in labeled_featuresets).max()
191
+ return DecisionTreeClassifier(label)
192
+
193
+ @staticmethod
194
+ def stump(feature_name, labeled_featuresets):
195
+ label = FreqDist(label for (featureset, label) in labeled_featuresets).max()
196
+
197
+ # Find the best label for each value.
198
+ freqs = defaultdict(FreqDist) # freq(label|value)
199
+ for featureset, label in labeled_featuresets:
200
+ feature_value = featureset.get(feature_name)
201
+ freqs[feature_value][label] += 1
202
+
203
+ decisions = {val: DecisionTreeClassifier(freqs[val].max()) for val in freqs}
204
+ return DecisionTreeClassifier(label, feature_name, decisions)
205
+
206
+ def refine(
207
+ self,
208
+ labeled_featuresets,
209
+ entropy_cutoff,
210
+ depth_cutoff,
211
+ support_cutoff,
212
+ binary=False,
213
+ feature_values=None,
214
+ verbose=False,
215
+ ):
216
+ if len(labeled_featuresets) <= support_cutoff:
217
+ return
218
+ if self._fname is None:
219
+ return
220
+ if depth_cutoff <= 0:
221
+ return
222
+ for fval in self._decisions:
223
+ fval_featuresets = [
224
+ (featureset, label)
225
+ for (featureset, label) in labeled_featuresets
226
+ if featureset.get(self._fname) == fval
227
+ ]
228
+
229
+ label_freqs = FreqDist(label for (featureset, label) in fval_featuresets)
230
+ if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:
231
+ self._decisions[fval] = DecisionTreeClassifier.train(
232
+ fval_featuresets,
233
+ entropy_cutoff,
234
+ depth_cutoff,
235
+ support_cutoff,
236
+ binary,
237
+ feature_values,
238
+ verbose,
239
+ )
240
+ if self._default is not None:
241
+ default_featuresets = [
242
+ (featureset, label)
243
+ for (featureset, label) in labeled_featuresets
244
+ if featureset.get(self._fname) not in self._decisions
245
+ ]
246
+ label_freqs = FreqDist(label for (featureset, label) in default_featuresets)
247
+ if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:
248
+ self._default = DecisionTreeClassifier.train(
249
+ default_featuresets,
250
+ entropy_cutoff,
251
+ depth_cutoff,
252
+ support_cutoff,
253
+ binary,
254
+ feature_values,
255
+ verbose,
256
+ )
257
+
258
+ @staticmethod
259
+ def best_stump(feature_names, labeled_featuresets, verbose=False):
260
+ best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)
261
+ best_error = best_stump.error(labeled_featuresets)
262
+ for fname in feature_names:
263
+ stump = DecisionTreeClassifier.stump(fname, labeled_featuresets)
264
+ stump_error = stump.error(labeled_featuresets)
265
+ if stump_error < best_error:
266
+ best_error = stump_error
267
+ best_stump = stump
268
+ if verbose:
269
+ print(
270
+ "best stump for {:6d} toks uses {:20} err={:6.4f}".format(
271
+ len(labeled_featuresets), best_stump._fname, best_error
272
+ )
273
+ )
274
+ return best_stump
275
+
276
+ @staticmethod
277
+ def binary_stump(feature_name, feature_value, labeled_featuresets):
278
+ label = FreqDist(label for (featureset, label) in labeled_featuresets).max()
279
+
280
+ # Find the best label for each value.
281
+ pos_fdist = FreqDist()
282
+ neg_fdist = FreqDist()
283
+ for featureset, label in labeled_featuresets:
284
+ if featureset.get(feature_name) == feature_value:
285
+ pos_fdist[label] += 1
286
+ else:
287
+ neg_fdist[label] += 1
288
+
289
+ decisions = {}
290
+ default = label
291
+ # But hopefully we have observations!
292
+ if pos_fdist.N() > 0:
293
+ decisions = {feature_value: DecisionTreeClassifier(pos_fdist.max())}
294
+ if neg_fdist.N() > 0:
295
+ default = DecisionTreeClassifier(neg_fdist.max())
296
+
297
+ return DecisionTreeClassifier(label, feature_name, decisions, default)
298
+
299
+ @staticmethod
300
+ def best_binary_stump(
301
+ feature_names, labeled_featuresets, feature_values, verbose=False
302
+ ):
303
+ best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)
304
+ best_error = best_stump.error(labeled_featuresets)
305
+ for fname in feature_names:
306
+ for fval in feature_values[fname]:
307
+ stump = DecisionTreeClassifier.binary_stump(
308
+ fname, fval, labeled_featuresets
309
+ )
310
+ stump_error = stump.error(labeled_featuresets)
311
+ if stump_error < best_error:
312
+ best_error = stump_error
313
+ best_stump = stump
314
+ if verbose:
315
+ if best_stump._decisions:
316
+ descr = "{}={}".format(
317
+ best_stump._fname, list(best_stump._decisions.keys())[0]
318
+ )
319
+ else:
320
+ descr = "(default)"
321
+ print(
322
+ "best stump for {:6d} toks uses {:20} err={:6.4f}".format(
323
+ len(labeled_featuresets), descr, best_error
324
+ )
325
+ )
326
+ return best_stump
327
+
328
+
329
+ ##//////////////////////////////////////////////////////
330
+ ## Demo
331
+ ##//////////////////////////////////////////////////////
332
+
333
+
334
+ def f(x):
335
+ return DecisionTreeClassifier.train(x, binary=True, verbose=True)
336
+
337
+
338
+ def demo():
339
+ from nltk.classify.util import binary_names_demo_features, names_demo
340
+
341
+ classifier = names_demo(
342
+ f, binary_names_demo_features # DecisionTreeClassifier.train,
343
+ )
344
+ print(classifier.pretty_format(depth=7))
345
+ print(classifier.pseudocode(depth=7))
346
+
347
+
348
+ if __name__ == "__main__":
349
+ demo()
lib/python3.10/site-packages/nltk/classify/maxent.py ADDED
@@ -0,0 +1,1569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Maximum Entropy Classifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Dmitry Chichkov <[email protected]> (TypedMaxentFeatureEncoding)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A classifier model based on maximum entropy modeling framework. This
11
+ framework considers all of the probability distributions that are
12
+ empirically consistent with the training data; and chooses the
13
+ distribution with the highest entropy. A probability distribution is
14
+ "empirically consistent" with a set of training data if its estimated
15
+ frequency with which a class and a feature vector value co-occur is
16
+ equal to the actual frequency in the data.
17
+
18
+ Terminology: 'feature'
19
+ ======================
20
+ The term *feature* is usually used to refer to some property of an
21
+ unlabeled token. For example, when performing word sense
22
+ disambiguation, we might define a ``'prevword'`` feature whose value is
23
+ the word preceding the target word. However, in the context of
24
+ maxent modeling, the term *feature* is typically used to refer to a
25
+ property of a "labeled" token. In order to prevent confusion, we
26
+ will introduce two distinct terms to disambiguate these two different
27
+ concepts:
28
+
29
+ - An "input-feature" is a property of an unlabeled token.
30
+ - A "joint-feature" is a property of a labeled token.
31
+
32
+ In the rest of the ``nltk.classify`` module, the term "features" is
33
+ used to refer to what we will call "input-features" in this module.
34
+
35
+ In literature that describes and discusses maximum entropy models,
36
+ input-features are typically called "contexts", and joint-features
37
+ are simply referred to as "features".
38
+
39
+ Converting Input-Features to Joint-Features
40
+ -------------------------------------------
41
+ In maximum entropy models, joint-features are required to have numeric
42
+ values. Typically, each input-feature ``input_feat`` is mapped to a
43
+ set of joint-features of the form:
44
+
45
+ | joint_feat(token, label) = { 1 if input_feat(token) == feat_val
46
+ | { and label == some_label
47
+ | {
48
+ | { 0 otherwise
49
+
50
+ For all values of ``feat_val`` and ``some_label``. This mapping is
51
+ performed by classes that implement the ``MaxentFeatureEncodingI``
52
+ interface.
53
+ """
54
+ try:
55
+ import numpy
56
+ except ImportError:
57
+ pass
58
+
59
+ import os
60
+ import tempfile
61
+ from collections import defaultdict
62
+
63
+ from nltk.classify.api import ClassifierI
64
+ from nltk.classify.megam import call_megam, parse_megam_weights, write_megam_file
65
+ from nltk.classify.tadm import call_tadm, parse_tadm_weights, write_tadm_file
66
+ from nltk.classify.util import CutoffChecker, accuracy, log_likelihood
67
+ from nltk.data import gzip_open_unicode
68
+ from nltk.probability import DictionaryProbDist
69
+ from nltk.util import OrderedDict
70
+
71
+ __docformat__ = "epytext en"
72
+
73
+ ######################################################################
74
+ # { Classifier Model
75
+ ######################################################################
76
+
77
+
78
+ class MaxentClassifier(ClassifierI):
79
+ """
80
+ A maximum entropy classifier (also known as a "conditional
81
+ exponential classifier"). This classifier is parameterized by a
82
+ set of "weights", which are used to combine the joint-features
83
+ that are generated from a featureset by an "encoding". In
84
+ particular, the encoding maps each ``(featureset, label)`` pair to
85
+ a vector. The probability of each label is then computed using
86
+ the following equation::
87
+
88
+ dotprod(weights, encode(fs,label))
89
+ prob(fs|label) = ---------------------------------------------------
90
+ sum(dotprod(weights, encode(fs,l)) for l in labels)
91
+
92
+ Where ``dotprod`` is the dot product::
93
+
94
+ dotprod(a,b) = sum(x*y for (x,y) in zip(a,b))
95
+ """
96
+
97
+ def __init__(self, encoding, weights, logarithmic=True):
98
+ """
99
+ Construct a new maxent classifier model. Typically, new
100
+ classifier models are created using the ``train()`` method.
101
+
102
+ :type encoding: MaxentFeatureEncodingI
103
+ :param encoding: An encoding that is used to convert the
104
+ featuresets that are given to the ``classify`` method into
105
+ joint-feature vectors, which are used by the maxent
106
+ classifier model.
107
+
108
+ :type weights: list of float
109
+ :param weights: The feature weight vector for this classifier.
110
+
111
+ :type logarithmic: bool
112
+ :param logarithmic: If false, then use non-logarithmic weights.
113
+ """
114
+ self._encoding = encoding
115
+ self._weights = weights
116
+ self._logarithmic = logarithmic
117
+ # self._logarithmic = False
118
+ assert encoding.length() == len(weights)
119
+
120
+ def labels(self):
121
+ return self._encoding.labels()
122
+
123
+ def set_weights(self, new_weights):
124
+ """
125
+ Set the feature weight vector for this classifier.
126
+ :param new_weights: The new feature weight vector.
127
+ :type new_weights: list of float
128
+ """
129
+ self._weights = new_weights
130
+ assert self._encoding.length() == len(new_weights)
131
+
132
+ def weights(self):
133
+ """
134
+ :return: The feature weight vector for this classifier.
135
+ :rtype: list of float
136
+ """
137
+ return self._weights
138
+
139
+ def classify(self, featureset):
140
+ return self.prob_classify(featureset).max()
141
+
142
+ def prob_classify(self, featureset):
143
+ prob_dict = {}
144
+ for label in self._encoding.labels():
145
+ feature_vector = self._encoding.encode(featureset, label)
146
+
147
+ if self._logarithmic:
148
+ total = 0.0
149
+ for (f_id, f_val) in feature_vector:
150
+ total += self._weights[f_id] * f_val
151
+ prob_dict[label] = total
152
+
153
+ else:
154
+ prod = 1.0
155
+ for (f_id, f_val) in feature_vector:
156
+ prod *= self._weights[f_id] ** f_val
157
+ prob_dict[label] = prod
158
+
159
+ # Normalize the dictionary to give a probability distribution
160
+ return DictionaryProbDist(prob_dict, log=self._logarithmic, normalize=True)
161
+
162
+ def explain(self, featureset, columns=4):
163
+ """
164
+ Print a table showing the effect of each of the features in
165
+ the given feature set, and how they combine to determine the
166
+ probabilities of each label for that featureset.
167
+ """
168
+ descr_width = 50
169
+ TEMPLATE = " %-" + str(descr_width - 2) + "s%s%8.3f"
170
+
171
+ pdist = self.prob_classify(featureset)
172
+ labels = sorted(pdist.samples(), key=pdist.prob, reverse=True)
173
+ labels = labels[:columns]
174
+ print(
175
+ " Feature".ljust(descr_width)
176
+ + "".join("%8s" % (("%s" % l)[:7]) for l in labels)
177
+ )
178
+ print(" " + "-" * (descr_width - 2 + 8 * len(labels)))
179
+ sums = defaultdict(int)
180
+ for i, label in enumerate(labels):
181
+ feature_vector = self._encoding.encode(featureset, label)
182
+ feature_vector.sort(
183
+ key=lambda fid__: abs(self._weights[fid__[0]]), reverse=True
184
+ )
185
+ for (f_id, f_val) in feature_vector:
186
+ if self._logarithmic:
187
+ score = self._weights[f_id] * f_val
188
+ else:
189
+ score = self._weights[f_id] ** f_val
190
+ descr = self._encoding.describe(f_id)
191
+ descr = descr.split(" and label is ")[0] # hack
192
+ descr += " (%s)" % f_val # hack
193
+ if len(descr) > 47:
194
+ descr = descr[:44] + "..."
195
+ print(TEMPLATE % (descr, i * 8 * " ", score))
196
+ sums[label] += score
197
+ print(" " + "-" * (descr_width - 1 + 8 * len(labels)))
198
+ print(
199
+ " TOTAL:".ljust(descr_width) + "".join("%8.3f" % sums[l] for l in labels)
200
+ )
201
+ print(
202
+ " PROBS:".ljust(descr_width)
203
+ + "".join("%8.3f" % pdist.prob(l) for l in labels)
204
+ )
205
+
206
+ def most_informative_features(self, n=10):
207
+ """
208
+ Generates the ranked list of informative features from most to least.
209
+ """
210
+ if hasattr(self, "_most_informative_features"):
211
+ return self._most_informative_features[:n]
212
+ else:
213
+ self._most_informative_features = sorted(
214
+ list(range(len(self._weights))),
215
+ key=lambda fid: abs(self._weights[fid]),
216
+ reverse=True,
217
+ )
218
+ return self._most_informative_features[:n]
219
+
220
+ def show_most_informative_features(self, n=10, show="all"):
221
+ """
222
+ :param show: all, neg, or pos (for negative-only or positive-only)
223
+ :type show: str
224
+ :param n: The no. of top features
225
+ :type n: int
226
+ """
227
+ # Use None the full list of ranked features.
228
+ fids = self.most_informative_features(None)
229
+ if show == "pos":
230
+ fids = [fid for fid in fids if self._weights[fid] > 0]
231
+ elif show == "neg":
232
+ fids = [fid for fid in fids if self._weights[fid] < 0]
233
+ for fid in fids[:n]:
234
+ print(f"{self._weights[fid]:8.3f} {self._encoding.describe(fid)}")
235
+
236
+ def __repr__(self):
237
+ return "<ConditionalExponentialClassifier: %d labels, %d features>" % (
238
+ len(self._encoding.labels()),
239
+ self._encoding.length(),
240
+ )
241
+
242
+ #: A list of the algorithm names that are accepted for the
243
+ #: ``train()`` method's ``algorithm`` parameter.
244
+ ALGORITHMS = ["GIS", "IIS", "MEGAM", "TADM"]
245
+
246
+ @classmethod
247
+ def train(
248
+ cls,
249
+ train_toks,
250
+ algorithm=None,
251
+ trace=3,
252
+ encoding=None,
253
+ labels=None,
254
+ gaussian_prior_sigma=0,
255
+ **cutoffs,
256
+ ):
257
+ """
258
+ Train a new maxent classifier based on the given corpus of
259
+ training samples. This classifier will have its weights
260
+ chosen to maximize entropy while remaining empirically
261
+ consistent with the training corpus.
262
+
263
+ :rtype: MaxentClassifier
264
+ :return: The new maxent classifier
265
+
266
+ :type train_toks: list
267
+ :param train_toks: Training data, represented as a list of
268
+ pairs, the first member of which is a featureset,
269
+ and the second of which is a classification label.
270
+
271
+ :type algorithm: str
272
+ :param algorithm: A case-insensitive string, specifying which
273
+ algorithm should be used to train the classifier. The
274
+ following algorithms are currently available.
275
+
276
+ - Iterative Scaling Methods: Generalized Iterative Scaling (``'GIS'``),
277
+ Improved Iterative Scaling (``'IIS'``)
278
+ - External Libraries (requiring megam):
279
+ LM-BFGS algorithm, with training performed by Megam (``'megam'``)
280
+
281
+ The default algorithm is ``'IIS'``.
282
+
283
+ :type trace: int
284
+ :param trace: The level of diagnostic tracing output to produce.
285
+ Higher values produce more verbose output.
286
+ :type encoding: MaxentFeatureEncodingI
287
+ :param encoding: A feature encoding, used to convert featuresets
288
+ into feature vectors. If none is specified, then a
289
+ ``BinaryMaxentFeatureEncoding`` will be built based on the
290
+ features that are attested in the training corpus.
291
+ :type labels: list(str)
292
+ :param labels: The set of possible labels. If none is given, then
293
+ the set of all labels attested in the training data will be
294
+ used instead.
295
+ :param gaussian_prior_sigma: The sigma value for a gaussian
296
+ prior on model weights. Currently, this is supported by
297
+ ``megam``. For other algorithms, its value is ignored.
298
+ :param cutoffs: Arguments specifying various conditions under
299
+ which the training should be halted. (Some of the cutoff
300
+ conditions are not supported by some algorithms.)
301
+
302
+ - ``max_iter=v``: Terminate after ``v`` iterations.
303
+ - ``min_ll=v``: Terminate after the negative average
304
+ log-likelihood drops under ``v``.
305
+ - ``min_lldelta=v``: Terminate if a single iteration improves
306
+ log likelihood by less than ``v``.
307
+ """
308
+ if algorithm is None:
309
+ algorithm = "iis"
310
+ for key in cutoffs:
311
+ if key not in (
312
+ "max_iter",
313
+ "min_ll",
314
+ "min_lldelta",
315
+ "max_acc",
316
+ "min_accdelta",
317
+ "count_cutoff",
318
+ "norm",
319
+ "explicit",
320
+ "bernoulli",
321
+ ):
322
+ raise TypeError("Unexpected keyword arg %r" % key)
323
+ algorithm = algorithm.lower()
324
+ if algorithm == "iis":
325
+ return train_maxent_classifier_with_iis(
326
+ train_toks, trace, encoding, labels, **cutoffs
327
+ )
328
+ elif algorithm == "gis":
329
+ return train_maxent_classifier_with_gis(
330
+ train_toks, trace, encoding, labels, **cutoffs
331
+ )
332
+ elif algorithm == "megam":
333
+ return train_maxent_classifier_with_megam(
334
+ train_toks, trace, encoding, labels, gaussian_prior_sigma, **cutoffs
335
+ )
336
+ elif algorithm == "tadm":
337
+ kwargs = cutoffs
338
+ kwargs["trace"] = trace
339
+ kwargs["encoding"] = encoding
340
+ kwargs["labels"] = labels
341
+ kwargs["gaussian_prior_sigma"] = gaussian_prior_sigma
342
+ return TadmMaxentClassifier.train(train_toks, **kwargs)
343
+ else:
344
+ raise ValueError("Unknown algorithm %s" % algorithm)
345
+
346
+
347
+ #: Alias for MaxentClassifier.
348
+ ConditionalExponentialClassifier = MaxentClassifier
349
+
350
+
351
+ ######################################################################
352
+ # { Feature Encodings
353
+ ######################################################################
354
+
355
+
356
+ class MaxentFeatureEncodingI:
357
+ """
358
+ A mapping that converts a set of input-feature values to a vector
359
+ of joint-feature values, given a label. This conversion is
360
+ necessary to translate featuresets into a format that can be used
361
+ by maximum entropy models.
362
+
363
+ The set of joint-features used by a given encoding is fixed, and
364
+ each index in the generated joint-feature vectors corresponds to a
365
+ single joint-feature. The length of the generated joint-feature
366
+ vectors is therefore constant (for a given encoding).
367
+
368
+ Because the joint-feature vectors generated by
369
+ ``MaxentFeatureEncodingI`` are typically very sparse, they are
370
+ represented as a list of ``(index, value)`` tuples, specifying the
371
+ value of each non-zero joint-feature.
372
+
373
+ Feature encodings are generally created using the ``train()``
374
+ method, which generates an appropriate encoding based on the
375
+ input-feature values and labels that are present in a given
376
+ corpus.
377
+ """
378
+
379
+ def encode(self, featureset, label):
380
+ """
381
+ Given a (featureset, label) pair, return the corresponding
382
+ vector of joint-feature values. This vector is represented as
383
+ a list of ``(index, value)`` tuples, specifying the value of
384
+ each non-zero joint-feature.
385
+
386
+ :type featureset: dict
387
+ :rtype: list(tuple(int, int))
388
+ """
389
+ raise NotImplementedError()
390
+
391
+ def length(self):
392
+ """
393
+ :return: The size of the fixed-length joint-feature vectors
394
+ that are generated by this encoding.
395
+ :rtype: int
396
+ """
397
+ raise NotImplementedError()
398
+
399
+ def labels(self):
400
+ """
401
+ :return: A list of the \"known labels\" -- i.e., all labels
402
+ ``l`` such that ``self.encode(fs,l)`` can be a nonzero
403
+ joint-feature vector for some value of ``fs``.
404
+ :rtype: list
405
+ """
406
+ raise NotImplementedError()
407
+
408
+ def describe(self, fid):
409
+ """
410
+ :return: A string describing the value of the joint-feature
411
+ whose index in the generated feature vectors is ``fid``.
412
+ :rtype: str
413
+ """
414
+ raise NotImplementedError()
415
+
416
+ def train(cls, train_toks):
417
+ """
418
+ Construct and return new feature encoding, based on a given
419
+ training corpus ``train_toks``.
420
+
421
+ :type train_toks: list(tuple(dict, str))
422
+ :param train_toks: Training data, represented as a list of
423
+ pairs, the first member of which is a feature dictionary,
424
+ and the second of which is a classification label.
425
+ """
426
+ raise NotImplementedError()
427
+
428
+
429
+ class FunctionBackedMaxentFeatureEncoding(MaxentFeatureEncodingI):
430
+ """
431
+ A feature encoding that calls a user-supplied function to map a
432
+ given featureset/label pair to a sparse joint-feature vector.
433
+ """
434
+
435
+ def __init__(self, func, length, labels):
436
+ """
437
+ Construct a new feature encoding based on the given function.
438
+
439
+ :type func: (callable)
440
+ :param func: A function that takes two arguments, a featureset
441
+ and a label, and returns the sparse joint feature vector
442
+ that encodes them::
443
+
444
+ func(featureset, label) -> feature_vector
445
+
446
+ This sparse joint feature vector (``feature_vector``) is a
447
+ list of ``(index,value)`` tuples.
448
+
449
+ :type length: int
450
+ :param length: The size of the fixed-length joint-feature
451
+ vectors that are generated by this encoding.
452
+
453
+ :type labels: list
454
+ :param labels: A list of the \"known labels\" for this
455
+ encoding -- i.e., all labels ``l`` such that
456
+ ``self.encode(fs,l)`` can be a nonzero joint-feature vector
457
+ for some value of ``fs``.
458
+ """
459
+ self._length = length
460
+ self._func = func
461
+ self._labels = labels
462
+
463
+ def encode(self, featureset, label):
464
+ return self._func(featureset, label)
465
+
466
+ def length(self):
467
+ return self._length
468
+
469
+ def labels(self):
470
+ return self._labels
471
+
472
+ def describe(self, fid):
473
+ return "no description available"
474
+
475
+
476
+ class BinaryMaxentFeatureEncoding(MaxentFeatureEncodingI):
477
+ """
478
+ A feature encoding that generates vectors containing a binary
479
+ joint-features of the form:
480
+
481
+ | joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
482
+ | {
483
+ | { 0 otherwise
484
+
485
+ Where ``fname`` is the name of an input-feature, ``fval`` is a value
486
+ for that input-feature, and ``label`` is a label.
487
+
488
+ Typically, these features are constructed based on a training
489
+ corpus, using the ``train()`` method. This method will create one
490
+ feature for each combination of ``fname``, ``fval``, and ``label``
491
+ that occurs at least once in the training corpus.
492
+
493
+ The ``unseen_features`` parameter can be used to add "unseen-value
494
+ features", which are used whenever an input feature has a value
495
+ that was not encountered in the training corpus. These features
496
+ have the form:
497
+
498
+ | joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
499
+ | { and l == label
500
+ | {
501
+ | { 0 otherwise
502
+
503
+ Where ``is_unseen(fname, fval)`` is true if the encoding does not
504
+ contain any joint features that are true when ``fs[fname]==fval``.
505
+
506
+ The ``alwayson_features`` parameter can be used to add "always-on
507
+ features", which have the form::
508
+
509
+ | joint_feat(fs, l) = { 1 if (l == label)
510
+ | {
511
+ | { 0 otherwise
512
+
513
+ These always-on features allow the maxent model to directly model
514
+ the prior probabilities of each label.
515
+ """
516
+
517
+ def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
518
+ """
519
+ :param labels: A list of the \"known labels\" for this encoding.
520
+
521
+ :param mapping: A dictionary mapping from ``(fname,fval,label)``
522
+ tuples to corresponding joint-feature indexes. These
523
+ indexes must be the set of integers from 0...len(mapping).
524
+ If ``mapping[fname,fval,label]=id``, then
525
+ ``self.encode(..., fname:fval, ..., label)[id]`` is 1;
526
+ otherwise, it is 0.
527
+
528
+ :param unseen_features: If true, then include unseen value
529
+ features in the generated joint-feature vectors.
530
+
531
+ :param alwayson_features: If true, then include always-on
532
+ features in the generated joint-feature vectors.
533
+ """
534
+ if set(mapping.values()) != set(range(len(mapping))):
535
+ raise ValueError(
536
+ "Mapping values must be exactly the "
537
+ "set of integers from 0...len(mapping)"
538
+ )
539
+
540
+ self._labels = list(labels)
541
+ """A list of attested labels."""
542
+
543
+ self._mapping = mapping
544
+ """dict mapping from (fname,fval,label) -> fid"""
545
+
546
+ self._length = len(mapping)
547
+ """The length of generated joint feature vectors."""
548
+
549
+ self._alwayson = None
550
+ """dict mapping from label -> fid"""
551
+
552
+ self._unseen = None
553
+ """dict mapping from fname -> fid"""
554
+
555
+ if alwayson_features:
556
+ self._alwayson = {
557
+ label: i + self._length for (i, label) in enumerate(labels)
558
+ }
559
+ self._length += len(self._alwayson)
560
+
561
+ if unseen_features:
562
+ fnames = {fname for (fname, fval, label) in mapping}
563
+ self._unseen = {fname: i + self._length for (i, fname) in enumerate(fnames)}
564
+ self._length += len(fnames)
565
+
566
+ def encode(self, featureset, label):
567
+ # Inherit docs.
568
+ encoding = []
569
+
570
+ # Convert input-features to joint-features:
571
+ for fname, fval in featureset.items():
572
+ # Known feature name & value:
573
+ if (fname, fval, label) in self._mapping:
574
+ encoding.append((self._mapping[fname, fval, label], 1))
575
+
576
+ # Otherwise, we might want to fire an "unseen-value feature".
577
+ elif self._unseen:
578
+ # Have we seen this fname/fval combination with any label?
579
+ for label2 in self._labels:
580
+ if (fname, fval, label2) in self._mapping:
581
+ break # we've seen this fname/fval combo
582
+ # We haven't -- fire the unseen-value feature
583
+ else:
584
+ if fname in self._unseen:
585
+ encoding.append((self._unseen[fname], 1))
586
+
587
+ # Add always-on features:
588
+ if self._alwayson and label in self._alwayson:
589
+ encoding.append((self._alwayson[label], 1))
590
+
591
+ return encoding
592
+
593
+ def describe(self, f_id):
594
+ # Inherit docs.
595
+ if not isinstance(f_id, int):
596
+ raise TypeError("describe() expected an int")
597
+ try:
598
+ self._inv_mapping
599
+ except AttributeError:
600
+ self._inv_mapping = [-1] * len(self._mapping)
601
+ for (info, i) in self._mapping.items():
602
+ self._inv_mapping[i] = info
603
+
604
+ if f_id < len(self._mapping):
605
+ (fname, fval, label) = self._inv_mapping[f_id]
606
+ return f"{fname}=={fval!r} and label is {label!r}"
607
+ elif self._alwayson and f_id in self._alwayson.values():
608
+ for (label, f_id2) in self._alwayson.items():
609
+ if f_id == f_id2:
610
+ return "label is %r" % label
611
+ elif self._unseen and f_id in self._unseen.values():
612
+ for (fname, f_id2) in self._unseen.items():
613
+ if f_id == f_id2:
614
+ return "%s is unseen" % fname
615
+ else:
616
+ raise ValueError("Bad feature id")
617
+
618
+ def labels(self):
619
+ # Inherit docs.
620
+ return self._labels
621
+
622
+ def length(self):
623
+ # Inherit docs.
624
+ return self._length
625
+
626
+ @classmethod
627
+ def train(cls, train_toks, count_cutoff=0, labels=None, **options):
628
+ """
629
+ Construct and return new feature encoding, based on a given
630
+ training corpus ``train_toks``. See the class description
631
+ ``BinaryMaxentFeatureEncoding`` for a description of the
632
+ joint-features that will be included in this encoding.
633
+
634
+ :type train_toks: list(tuple(dict, str))
635
+ :param train_toks: Training data, represented as a list of
636
+ pairs, the first member of which is a feature dictionary,
637
+ and the second of which is a classification label.
638
+
639
+ :type count_cutoff: int
640
+ :param count_cutoff: A cutoff value that is used to discard
641
+ rare joint-features. If a joint-feature's value is 1
642
+ fewer than ``count_cutoff`` times in the training corpus,
643
+ then that joint-feature is not included in the generated
644
+ encoding.
645
+
646
+ :type labels: list
647
+ :param labels: A list of labels that should be used by the
648
+ classifier. If not specified, then the set of labels
649
+ attested in ``train_toks`` will be used.
650
+
651
+ :param options: Extra parameters for the constructor, such as
652
+ ``unseen_features`` and ``alwayson_features``.
653
+ """
654
+ mapping = {} # maps (fname, fval, label) -> fid
655
+ seen_labels = set() # The set of labels we've encountered
656
+ count = defaultdict(int) # maps (fname, fval) -> count
657
+
658
+ for (tok, label) in train_toks:
659
+ if labels and label not in labels:
660
+ raise ValueError("Unexpected label %s" % label)
661
+ seen_labels.add(label)
662
+
663
+ # Record each of the features.
664
+ for (fname, fval) in tok.items():
665
+
666
+ # If a count cutoff is given, then only add a joint
667
+ # feature once the corresponding (fname, fval, label)
668
+ # tuple exceeds that cutoff.
669
+ count[fname, fval] += 1
670
+ if count[fname, fval] >= count_cutoff:
671
+ if (fname, fval, label) not in mapping:
672
+ mapping[fname, fval, label] = len(mapping)
673
+
674
+ if labels is None:
675
+ labels = seen_labels
676
+ return cls(labels, mapping, **options)
677
+
678
+
679
+ class GISEncoding(BinaryMaxentFeatureEncoding):
680
+ """
681
+ A binary feature encoding which adds one new joint-feature to the
682
+ joint-features defined by ``BinaryMaxentFeatureEncoding``: a
683
+ correction feature, whose value is chosen to ensure that the
684
+ sparse vector always sums to a constant non-negative number. This
685
+ new feature is used to ensure two preconditions for the GIS
686
+ training algorithm:
687
+
688
+ - At least one feature vector index must be nonzero for every
689
+ token.
690
+ - The feature vector must sum to a constant non-negative number
691
+ for every token.
692
+ """
693
+
694
+ def __init__(
695
+ self, labels, mapping, unseen_features=False, alwayson_features=False, C=None
696
+ ):
697
+ """
698
+ :param C: The correction constant. The value of the correction
699
+ feature is based on this value. In particular, its value is
700
+ ``C - sum([v for (f,v) in encoding])``.
701
+ :seealso: ``BinaryMaxentFeatureEncoding.__init__``
702
+ """
703
+ BinaryMaxentFeatureEncoding.__init__(
704
+ self, labels, mapping, unseen_features, alwayson_features
705
+ )
706
+ if C is None:
707
+ C = len({fname for (fname, fval, label) in mapping}) + 1
708
+ self._C = C
709
+
710
+ @property
711
+ def C(self):
712
+ """The non-negative constant that all encoded feature vectors
713
+ will sum to."""
714
+ return self._C
715
+
716
+ def encode(self, featureset, label):
717
+ # Get the basic encoding.
718
+ encoding = BinaryMaxentFeatureEncoding.encode(self, featureset, label)
719
+ base_length = BinaryMaxentFeatureEncoding.length(self)
720
+
721
+ # Add a correction feature.
722
+ total = sum(v for (f, v) in encoding)
723
+ if total >= self._C:
724
+ raise ValueError("Correction feature is not high enough!")
725
+ encoding.append((base_length, self._C - total))
726
+
727
+ # Return the result
728
+ return encoding
729
+
730
+ def length(self):
731
+ return BinaryMaxentFeatureEncoding.length(self) + 1
732
+
733
+ def describe(self, f_id):
734
+ if f_id == BinaryMaxentFeatureEncoding.length(self):
735
+ return "Correction feature (%s)" % self._C
736
+ else:
737
+ return BinaryMaxentFeatureEncoding.describe(self, f_id)
738
+
739
+
740
+ class TadmEventMaxentFeatureEncoding(BinaryMaxentFeatureEncoding):
741
+ def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
742
+ self._mapping = OrderedDict(mapping)
743
+ self._label_mapping = OrderedDict()
744
+ BinaryMaxentFeatureEncoding.__init__(
745
+ self, labels, self._mapping, unseen_features, alwayson_features
746
+ )
747
+
748
+ def encode(self, featureset, label):
749
+ encoding = []
750
+ for feature, value in featureset.items():
751
+ if (feature, label) not in self._mapping:
752
+ self._mapping[(feature, label)] = len(self._mapping)
753
+ if value not in self._label_mapping:
754
+ if not isinstance(value, int):
755
+ self._label_mapping[value] = len(self._label_mapping)
756
+ else:
757
+ self._label_mapping[value] = value
758
+ encoding.append(
759
+ (self._mapping[(feature, label)], self._label_mapping[value])
760
+ )
761
+ return encoding
762
+
763
+ def labels(self):
764
+ return self._labels
765
+
766
+ def describe(self, fid):
767
+ for (feature, label) in self._mapping:
768
+ if self._mapping[(feature, label)] == fid:
769
+ return (feature, label)
770
+
771
+ def length(self):
772
+ return len(self._mapping)
773
+
774
+ @classmethod
775
+ def train(cls, train_toks, count_cutoff=0, labels=None, **options):
776
+ mapping = OrderedDict()
777
+ if not labels:
778
+ labels = []
779
+
780
+ # This gets read twice, so compute the values in case it's lazy.
781
+ train_toks = list(train_toks)
782
+
783
+ for (featureset, label) in train_toks:
784
+ if label not in labels:
785
+ labels.append(label)
786
+
787
+ for (featureset, label) in train_toks:
788
+ for label in labels:
789
+ for feature in featureset:
790
+ if (feature, label) not in mapping:
791
+ mapping[(feature, label)] = len(mapping)
792
+
793
+ return cls(labels, mapping, **options)
794
+
795
+
796
+ class TypedMaxentFeatureEncoding(MaxentFeatureEncodingI):
797
+ """
798
+ A feature encoding that generates vectors containing integer,
799
+ float and binary joint-features of the form:
800
+
801
+ Binary (for string and boolean features):
802
+
803
+ | joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
804
+ | {
805
+ | { 0 otherwise
806
+
807
+ Value (for integer and float features):
808
+
809
+ | joint_feat(fs, l) = { fval if (fs[fname] == type(fval))
810
+ | { and (l == label)
811
+ | {
812
+ | { not encoded otherwise
813
+
814
+ Where ``fname`` is the name of an input-feature, ``fval`` is a value
815
+ for that input-feature, and ``label`` is a label.
816
+
817
+ Typically, these features are constructed based on a training
818
+ corpus, using the ``train()`` method.
819
+
820
+ For string and boolean features [type(fval) not in (int, float)]
821
+ this method will create one feature for each combination of
822
+ ``fname``, ``fval``, and ``label`` that occurs at least once in the
823
+ training corpus.
824
+
825
+ For integer and float features [type(fval) in (int, float)] this
826
+ method will create one feature for each combination of ``fname``
827
+ and ``label`` that occurs at least once in the training corpus.
828
+
829
+ For binary features the ``unseen_features`` parameter can be used
830
+ to add "unseen-value features", which are used whenever an input
831
+ feature has a value that was not encountered in the training
832
+ corpus. These features have the form:
833
+
834
+ | joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
835
+ | { and l == label
836
+ | {
837
+ | { 0 otherwise
838
+
839
+ Where ``is_unseen(fname, fval)`` is true if the encoding does not
840
+ contain any joint features that are true when ``fs[fname]==fval``.
841
+
842
+ The ``alwayson_features`` parameter can be used to add "always-on
843
+ features", which have the form:
844
+
845
+ | joint_feat(fs, l) = { 1 if (l == label)
846
+ | {
847
+ | { 0 otherwise
848
+
849
+ These always-on features allow the maxent model to directly model
850
+ the prior probabilities of each label.
851
+ """
852
+
853
+ def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
854
+ """
855
+ :param labels: A list of the \"known labels\" for this encoding.
856
+
857
+ :param mapping: A dictionary mapping from ``(fname,fval,label)``
858
+ tuples to corresponding joint-feature indexes. These
859
+ indexes must be the set of integers from 0...len(mapping).
860
+ If ``mapping[fname,fval,label]=id``, then
861
+ ``self.encode({..., fname:fval, ...``, label)[id]} is 1;
862
+ otherwise, it is 0.
863
+
864
+ :param unseen_features: If true, then include unseen value
865
+ features in the generated joint-feature vectors.
866
+
867
+ :param alwayson_features: If true, then include always-on
868
+ features in the generated joint-feature vectors.
869
+ """
870
+ if set(mapping.values()) != set(range(len(mapping))):
871
+ raise ValueError(
872
+ "Mapping values must be exactly the "
873
+ "set of integers from 0...len(mapping)"
874
+ )
875
+
876
+ self._labels = list(labels)
877
+ """A list of attested labels."""
878
+
879
+ self._mapping = mapping
880
+ """dict mapping from (fname,fval,label) -> fid"""
881
+
882
+ self._length = len(mapping)
883
+ """The length of generated joint feature vectors."""
884
+
885
+ self._alwayson = None
886
+ """dict mapping from label -> fid"""
887
+
888
+ self._unseen = None
889
+ """dict mapping from fname -> fid"""
890
+
891
+ if alwayson_features:
892
+ self._alwayson = {
893
+ label: i + self._length for (i, label) in enumerate(labels)
894
+ }
895
+ self._length += len(self._alwayson)
896
+
897
+ if unseen_features:
898
+ fnames = {fname for (fname, fval, label) in mapping}
899
+ self._unseen = {fname: i + self._length for (i, fname) in enumerate(fnames)}
900
+ self._length += len(fnames)
901
+
902
+ def encode(self, featureset, label):
903
+ # Inherit docs.
904
+ encoding = []
905
+
906
+ # Convert input-features to joint-features:
907
+ for fname, fval in featureset.items():
908
+ if isinstance(fval, (int, float)):
909
+ # Known feature name & value:
910
+ if (fname, type(fval), label) in self._mapping:
911
+ encoding.append((self._mapping[fname, type(fval), label], fval))
912
+ else:
913
+ # Known feature name & value:
914
+ if (fname, fval, label) in self._mapping:
915
+ encoding.append((self._mapping[fname, fval, label], 1))
916
+
917
+ # Otherwise, we might want to fire an "unseen-value feature".
918
+ elif self._unseen:
919
+ # Have we seen this fname/fval combination with any label?
920
+ for label2 in self._labels:
921
+ if (fname, fval, label2) in self._mapping:
922
+ break # we've seen this fname/fval combo
923
+ # We haven't -- fire the unseen-value feature
924
+ else:
925
+ if fname in self._unseen:
926
+ encoding.append((self._unseen[fname], 1))
927
+
928
+ # Add always-on features:
929
+ if self._alwayson and label in self._alwayson:
930
+ encoding.append((self._alwayson[label], 1))
931
+
932
+ return encoding
933
+
934
+ def describe(self, f_id):
935
+ # Inherit docs.
936
+ if not isinstance(f_id, int):
937
+ raise TypeError("describe() expected an int")
938
+ try:
939
+ self._inv_mapping
940
+ except AttributeError:
941
+ self._inv_mapping = [-1] * len(self._mapping)
942
+ for (info, i) in self._mapping.items():
943
+ self._inv_mapping[i] = info
944
+
945
+ if f_id < len(self._mapping):
946
+ (fname, fval, label) = self._inv_mapping[f_id]
947
+ return f"{fname}=={fval!r} and label is {label!r}"
948
+ elif self._alwayson and f_id in self._alwayson.values():
949
+ for (label, f_id2) in self._alwayson.items():
950
+ if f_id == f_id2:
951
+ return "label is %r" % label
952
+ elif self._unseen and f_id in self._unseen.values():
953
+ for (fname, f_id2) in self._unseen.items():
954
+ if f_id == f_id2:
955
+ return "%s is unseen" % fname
956
+ else:
957
+ raise ValueError("Bad feature id")
958
+
959
+ def labels(self):
960
+ # Inherit docs.
961
+ return self._labels
962
+
963
+ def length(self):
964
+ # Inherit docs.
965
+ return self._length
966
+
967
+ @classmethod
968
+ def train(cls, train_toks, count_cutoff=0, labels=None, **options):
969
+ """
970
+ Construct and return new feature encoding, based on a given
971
+ training corpus ``train_toks``. See the class description
972
+ ``TypedMaxentFeatureEncoding`` for a description of the
973
+ joint-features that will be included in this encoding.
974
+
975
+ Note: recognized feature values types are (int, float), over
976
+ types are interpreted as regular binary features.
977
+
978
+ :type train_toks: list(tuple(dict, str))
979
+ :param train_toks: Training data, represented as a list of
980
+ pairs, the first member of which is a feature dictionary,
981
+ and the second of which is a classification label.
982
+
983
+ :type count_cutoff: int
984
+ :param count_cutoff: A cutoff value that is used to discard
985
+ rare joint-features. If a joint-feature's value is 1
986
+ fewer than ``count_cutoff`` times in the training corpus,
987
+ then that joint-feature is not included in the generated
988
+ encoding.
989
+
990
+ :type labels: list
991
+ :param labels: A list of labels that should be used by the
992
+ classifier. If not specified, then the set of labels
993
+ attested in ``train_toks`` will be used.
994
+
995
+ :param options: Extra parameters for the constructor, such as
996
+ ``unseen_features`` and ``alwayson_features``.
997
+ """
998
+ mapping = {} # maps (fname, fval, label) -> fid
999
+ seen_labels = set() # The set of labels we've encountered
1000
+ count = defaultdict(int) # maps (fname, fval) -> count
1001
+
1002
+ for (tok, label) in train_toks:
1003
+ if labels and label not in labels:
1004
+ raise ValueError("Unexpected label %s" % label)
1005
+ seen_labels.add(label)
1006
+
1007
+ # Record each of the features.
1008
+ for (fname, fval) in tok.items():
1009
+ if type(fval) in (int, float):
1010
+ fval = type(fval)
1011
+ # If a count cutoff is given, then only add a joint
1012
+ # feature once the corresponding (fname, fval, label)
1013
+ # tuple exceeds that cutoff.
1014
+ count[fname, fval] += 1
1015
+ if count[fname, fval] >= count_cutoff:
1016
+ if (fname, fval, label) not in mapping:
1017
+ mapping[fname, fval, label] = len(mapping)
1018
+
1019
+ if labels is None:
1020
+ labels = seen_labels
1021
+ return cls(labels, mapping, **options)
1022
+
1023
+
1024
+ ######################################################################
1025
+ # { Classifier Trainer: Generalized Iterative Scaling
1026
+ ######################################################################
1027
+
1028
+
1029
+ def train_maxent_classifier_with_gis(
1030
+ train_toks, trace=3, encoding=None, labels=None, **cutoffs
1031
+ ):
1032
+ """
1033
+ Train a new ``ConditionalExponentialClassifier``, using the given
1034
+ training samples, using the Generalized Iterative Scaling
1035
+ algorithm. This ``ConditionalExponentialClassifier`` will encode
1036
+ the model that maximizes entropy from all the models that are
1037
+ empirically consistent with ``train_toks``.
1038
+
1039
+ :see: ``train_maxent_classifier()`` for parameter descriptions.
1040
+ """
1041
+ cutoffs.setdefault("max_iter", 100)
1042
+ cutoffchecker = CutoffChecker(cutoffs)
1043
+
1044
+ # Construct an encoding from the training data.
1045
+ if encoding is None:
1046
+ encoding = GISEncoding.train(train_toks, labels=labels)
1047
+
1048
+ if not hasattr(encoding, "C"):
1049
+ raise TypeError(
1050
+ "The GIS algorithm requires an encoding that "
1051
+ "defines C (e.g., GISEncoding)."
1052
+ )
1053
+
1054
+ # Cinv is the inverse of the sum of each joint feature vector.
1055
+ # This controls the learning rate: higher Cinv (or lower C) gives
1056
+ # faster learning.
1057
+ Cinv = 1.0 / encoding.C
1058
+
1059
+ # Count how many times each feature occurs in the training data.
1060
+ empirical_fcount = calculate_empirical_fcount(train_toks, encoding)
1061
+
1062
+ # Check for any features that are not attested in train_toks.
1063
+ unattested = set(numpy.nonzero(empirical_fcount == 0)[0])
1064
+
1065
+ # Build the classifier. Start with weight=0 for each attested
1066
+ # feature, and weight=-infinity for each unattested feature.
1067
+ weights = numpy.zeros(len(empirical_fcount), "d")
1068
+ for fid in unattested:
1069
+ weights[fid] = numpy.NINF
1070
+ classifier = ConditionalExponentialClassifier(encoding, weights)
1071
+
1072
+ # Take the log of the empirical fcount.
1073
+ log_empirical_fcount = numpy.log2(empirical_fcount)
1074
+ del empirical_fcount
1075
+
1076
+ if trace > 0:
1077
+ print(" ==> Training (%d iterations)" % cutoffs["max_iter"])
1078
+ if trace > 2:
1079
+ print()
1080
+ print(" Iteration Log Likelihood Accuracy")
1081
+ print(" ---------------------------------------")
1082
+
1083
+ # Train the classifier.
1084
+ try:
1085
+ while True:
1086
+ if trace > 2:
1087
+ ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
1088
+ acc = cutoffchecker.acc or accuracy(classifier, train_toks)
1089
+ iternum = cutoffchecker.iter
1090
+ print(" %9d %14.5f %9.3f" % (iternum, ll, acc))
1091
+
1092
+ # Use the model to estimate the number of times each
1093
+ # feature should occur in the training data.
1094
+ estimated_fcount = calculate_estimated_fcount(
1095
+ classifier, train_toks, encoding
1096
+ )
1097
+
1098
+ # Take the log of estimated fcount (avoid taking log(0).)
1099
+ for fid in unattested:
1100
+ estimated_fcount[fid] += 1
1101
+ log_estimated_fcount = numpy.log2(estimated_fcount)
1102
+ del estimated_fcount
1103
+
1104
+ # Update the classifier weights
1105
+ weights = classifier.weights()
1106
+ weights += (log_empirical_fcount - log_estimated_fcount) * Cinv
1107
+ classifier.set_weights(weights)
1108
+
1109
+ # Check the log-likelihood & accuracy cutoffs.
1110
+ if cutoffchecker.check(classifier, train_toks):
1111
+ break
1112
+
1113
+ except KeyboardInterrupt:
1114
+ print(" Training stopped: keyboard interrupt")
1115
+ except:
1116
+ raise
1117
+
1118
+ if trace > 2:
1119
+ ll = log_likelihood(classifier, train_toks)
1120
+ acc = accuracy(classifier, train_toks)
1121
+ print(f" Final {ll:14.5f} {acc:9.3f}")
1122
+
1123
+ # Return the classifier.
1124
+ return classifier
1125
+
1126
+
1127
+ def calculate_empirical_fcount(train_toks, encoding):
1128
+ fcount = numpy.zeros(encoding.length(), "d")
1129
+
1130
+ for tok, label in train_toks:
1131
+ for (index, val) in encoding.encode(tok, label):
1132
+ fcount[index] += val
1133
+
1134
+ return fcount
1135
+
1136
+
1137
+ def calculate_estimated_fcount(classifier, train_toks, encoding):
1138
+ fcount = numpy.zeros(encoding.length(), "d")
1139
+
1140
+ for tok, label in train_toks:
1141
+ pdist = classifier.prob_classify(tok)
1142
+ for label in pdist.samples():
1143
+ prob = pdist.prob(label)
1144
+ for (fid, fval) in encoding.encode(tok, label):
1145
+ fcount[fid] += prob * fval
1146
+
1147
+ return fcount
1148
+
1149
+
1150
+ ######################################################################
1151
+ # { Classifier Trainer: Improved Iterative Scaling
1152
+ ######################################################################
1153
+
1154
+
1155
+ def train_maxent_classifier_with_iis(
1156
+ train_toks, trace=3, encoding=None, labels=None, **cutoffs
1157
+ ):
1158
+ """
1159
+ Train a new ``ConditionalExponentialClassifier``, using the given
1160
+ training samples, using the Improved Iterative Scaling algorithm.
1161
+ This ``ConditionalExponentialClassifier`` will encode the model
1162
+ that maximizes entropy from all the models that are empirically
1163
+ consistent with ``train_toks``.
1164
+
1165
+ :see: ``train_maxent_classifier()`` for parameter descriptions.
1166
+ """
1167
+ cutoffs.setdefault("max_iter", 100)
1168
+ cutoffchecker = CutoffChecker(cutoffs)
1169
+
1170
+ # Construct an encoding from the training data.
1171
+ if encoding is None:
1172
+ encoding = BinaryMaxentFeatureEncoding.train(train_toks, labels=labels)
1173
+
1174
+ # Count how many times each feature occurs in the training data.
1175
+ empirical_ffreq = calculate_empirical_fcount(train_toks, encoding) / len(train_toks)
1176
+
1177
+ # Find the nf map, and related variables nfarray and nfident.
1178
+ # nf is the sum of the features for a given labeled text.
1179
+ # nfmap compresses this sparse set of values to a dense list.
1180
+ # nfarray performs the reverse operation. nfident is
1181
+ # nfarray multiplied by an identity matrix.
1182
+ nfmap = calculate_nfmap(train_toks, encoding)
1183
+ nfarray = numpy.array(sorted(nfmap, key=nfmap.__getitem__), "d")
1184
+ nftranspose = numpy.reshape(nfarray, (len(nfarray), 1))
1185
+
1186
+ # Check for any features that are not attested in train_toks.
1187
+ unattested = set(numpy.nonzero(empirical_ffreq == 0)[0])
1188
+
1189
+ # Build the classifier. Start with weight=0 for each attested
1190
+ # feature, and weight=-infinity for each unattested feature.
1191
+ weights = numpy.zeros(len(empirical_ffreq), "d")
1192
+ for fid in unattested:
1193
+ weights[fid] = numpy.NINF
1194
+ classifier = ConditionalExponentialClassifier(encoding, weights)
1195
+
1196
+ if trace > 0:
1197
+ print(" ==> Training (%d iterations)" % cutoffs["max_iter"])
1198
+ if trace > 2:
1199
+ print()
1200
+ print(" Iteration Log Likelihood Accuracy")
1201
+ print(" ---------------------------------------")
1202
+
1203
+ # Train the classifier.
1204
+ try:
1205
+ while True:
1206
+ if trace > 2:
1207
+ ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
1208
+ acc = cutoffchecker.acc or accuracy(classifier, train_toks)
1209
+ iternum = cutoffchecker.iter
1210
+ print(" %9d %14.5f %9.3f" % (iternum, ll, acc))
1211
+
1212
+ # Calculate the deltas for this iteration, using Newton's method.
1213
+ deltas = calculate_deltas(
1214
+ train_toks,
1215
+ classifier,
1216
+ unattested,
1217
+ empirical_ffreq,
1218
+ nfmap,
1219
+ nfarray,
1220
+ nftranspose,
1221
+ encoding,
1222
+ )
1223
+
1224
+ # Use the deltas to update our weights.
1225
+ weights = classifier.weights()
1226
+ weights += deltas
1227
+ classifier.set_weights(weights)
1228
+
1229
+ # Check the log-likelihood & accuracy cutoffs.
1230
+ if cutoffchecker.check(classifier, train_toks):
1231
+ break
1232
+
1233
+ except KeyboardInterrupt:
1234
+ print(" Training stopped: keyboard interrupt")
1235
+ except:
1236
+ raise
1237
+
1238
+ if trace > 2:
1239
+ ll = log_likelihood(classifier, train_toks)
1240
+ acc = accuracy(classifier, train_toks)
1241
+ print(f" Final {ll:14.5f} {acc:9.3f}")
1242
+
1243
+ # Return the classifier.
1244
+ return classifier
1245
+
1246
+
1247
+ def calculate_nfmap(train_toks, encoding):
1248
+ """
1249
+ Construct a map that can be used to compress ``nf`` (which is
1250
+ typically sparse).
1251
+
1252
+ *nf(feature_vector)* is the sum of the feature values for
1253
+ *feature_vector*.
1254
+
1255
+ This represents the number of features that are active for a
1256
+ given labeled text. This method finds all values of *nf(t)*
1257
+ that are attested for at least one token in the given list of
1258
+ training tokens; and constructs a dictionary mapping these
1259
+ attested values to a continuous range *0...N*. For example,
1260
+ if the only values of *nf()* that were attested were 3, 5, and
1261
+ 7, then ``_nfmap`` might return the dictionary ``{3:0, 5:1, 7:2}``.
1262
+
1263
+ :return: A map that can be used to compress ``nf`` to a dense
1264
+ vector.
1265
+ :rtype: dict(int -> int)
1266
+ """
1267
+ # Map from nf to indices. This allows us to use smaller arrays.
1268
+ nfset = set()
1269
+ for tok, _ in train_toks:
1270
+ for label in encoding.labels():
1271
+ nfset.add(sum(val for (id, val) in encoding.encode(tok, label)))
1272
+ return {nf: i for (i, nf) in enumerate(nfset)}
1273
+
1274
+
1275
+ def calculate_deltas(
1276
+ train_toks,
1277
+ classifier,
1278
+ unattested,
1279
+ ffreq_empirical,
1280
+ nfmap,
1281
+ nfarray,
1282
+ nftranspose,
1283
+ encoding,
1284
+ ):
1285
+ r"""
1286
+ Calculate the update values for the classifier weights for
1287
+ this iteration of IIS. These update weights are the value of
1288
+ ``delta`` that solves the equation::
1289
+
1290
+ ffreq_empirical[i]
1291
+ =
1292
+ SUM[fs,l] (classifier.prob_classify(fs).prob(l) *
1293
+ feature_vector(fs,l)[i] *
1294
+ exp(delta[i] * nf(feature_vector(fs,l))))
1295
+
1296
+ Where:
1297
+ - *(fs,l)* is a (featureset, label) tuple from ``train_toks``
1298
+ - *feature_vector(fs,l)* = ``encoding.encode(fs,l)``
1299
+ - *nf(vector)* = ``sum([val for (id,val) in vector])``
1300
+
1301
+ This method uses Newton's method to solve this equation for
1302
+ *delta[i]*. In particular, it starts with a guess of
1303
+ ``delta[i]`` = 1; and iteratively updates ``delta`` with:
1304
+
1305
+ | delta[i] -= (ffreq_empirical[i] - sum1[i])/(-sum2[i])
1306
+
1307
+ until convergence, where *sum1* and *sum2* are defined as:
1308
+
1309
+ | sum1[i](delta) = SUM[fs,l] f[i](fs,l,delta)
1310
+ | sum2[i](delta) = SUM[fs,l] (f[i](fs,l,delta).nf(feature_vector(fs,l)))
1311
+ | f[i](fs,l,delta) = (classifier.prob_classify(fs).prob(l) .
1312
+ | feature_vector(fs,l)[i] .
1313
+ | exp(delta[i] . nf(feature_vector(fs,l))))
1314
+
1315
+ Note that *sum1* and *sum2* depend on ``delta``; so they need
1316
+ to be re-computed each iteration.
1317
+
1318
+ The variables ``nfmap``, ``nfarray``, and ``nftranspose`` are
1319
+ used to generate a dense encoding for *nf(ltext)*. This
1320
+ allows ``_deltas`` to calculate *sum1* and *sum2* using
1321
+ matrices, which yields a significant performance improvement.
1322
+
1323
+ :param train_toks: The set of training tokens.
1324
+ :type train_toks: list(tuple(dict, str))
1325
+ :param classifier: The current classifier.
1326
+ :type classifier: ClassifierI
1327
+ :param ffreq_empirical: An array containing the empirical
1328
+ frequency for each feature. The *i*\ th element of this
1329
+ array is the empirical frequency for feature *i*.
1330
+ :type ffreq_empirical: sequence of float
1331
+ :param unattested: An array that is 1 for features that are
1332
+ not attested in the training data; and 0 for features that
1333
+ are attested. In other words, ``unattested[i]==0`` iff
1334
+ ``ffreq_empirical[i]==0``.
1335
+ :type unattested: sequence of int
1336
+ :param nfmap: A map that can be used to compress ``nf`` to a dense
1337
+ vector.
1338
+ :type nfmap: dict(int -> int)
1339
+ :param nfarray: An array that can be used to uncompress ``nf``
1340
+ from a dense vector.
1341
+ :type nfarray: array(float)
1342
+ :param nftranspose: The transpose of ``nfarray``
1343
+ :type nftranspose: array(float)
1344
+ """
1345
+ # These parameters control when we decide that we've
1346
+ # converged. It probably should be possible to set these
1347
+ # manually, via keyword arguments to train.
1348
+ NEWTON_CONVERGE = 1e-12
1349
+ MAX_NEWTON = 300
1350
+
1351
+ deltas = numpy.ones(encoding.length(), "d")
1352
+
1353
+ # Precompute the A matrix:
1354
+ # A[nf][id] = sum ( p(fs) * p(label|fs) * f(fs,label) )
1355
+ # over all label,fs s.t. num_features[label,fs]=nf
1356
+ A = numpy.zeros((len(nfmap), encoding.length()), "d")
1357
+
1358
+ for tok, label in train_toks:
1359
+ dist = classifier.prob_classify(tok)
1360
+
1361
+ for label in encoding.labels():
1362
+ # Generate the feature vector
1363
+ feature_vector = encoding.encode(tok, label)
1364
+ # Find the number of active features
1365
+ nf = sum(val for (id, val) in feature_vector)
1366
+ # Update the A matrix
1367
+ for (id, val) in feature_vector:
1368
+ A[nfmap[nf], id] += dist.prob(label) * val
1369
+ A /= len(train_toks)
1370
+
1371
+ # Iteratively solve for delta. Use the following variables:
1372
+ # - nf_delta[x][y] = nfarray[x] * delta[y]
1373
+ # - exp_nf_delta[x][y] = exp(nf[x] * delta[y])
1374
+ # - nf_exp_nf_delta[x][y] = nf[x] * exp(nf[x] * delta[y])
1375
+ # - sum1[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
1376
+ # exp(delta[i]nf)
1377
+ # - sum2[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
1378
+ # nf exp(delta[i]nf)
1379
+ for rangenum in range(MAX_NEWTON):
1380
+ nf_delta = numpy.outer(nfarray, deltas)
1381
+ exp_nf_delta = 2**nf_delta
1382
+ nf_exp_nf_delta = nftranspose * exp_nf_delta
1383
+ sum1 = numpy.sum(exp_nf_delta * A, axis=0)
1384
+ sum2 = numpy.sum(nf_exp_nf_delta * A, axis=0)
1385
+
1386
+ # Avoid division by zero.
1387
+ for fid in unattested:
1388
+ sum2[fid] += 1
1389
+
1390
+ # Update the deltas.
1391
+ deltas -= (ffreq_empirical - sum1) / -sum2
1392
+
1393
+ # We can stop once we converge.
1394
+ n_error = numpy.sum(abs(ffreq_empirical - sum1)) / numpy.sum(abs(deltas))
1395
+ if n_error < NEWTON_CONVERGE:
1396
+ return deltas
1397
+
1398
+ return deltas
1399
+
1400
+
1401
+ ######################################################################
1402
+ # { Classifier Trainer: megam
1403
+ ######################################################################
1404
+
1405
+ # [xx] possible extension: add support for using implicit file format;
1406
+ # this would need to put requirements on what encoding is used. But
1407
+ # we may need this for other maxent classifier trainers that require
1408
+ # implicit formats anyway.
1409
+ def train_maxent_classifier_with_megam(
1410
+ train_toks, trace=3, encoding=None, labels=None, gaussian_prior_sigma=0, **kwargs
1411
+ ):
1412
+ """
1413
+ Train a new ``ConditionalExponentialClassifier``, using the given
1414
+ training samples, using the external ``megam`` library. This
1415
+ ``ConditionalExponentialClassifier`` will encode the model that
1416
+ maximizes entropy from all the models that are empirically
1417
+ consistent with ``train_toks``.
1418
+
1419
+ :see: ``train_maxent_classifier()`` for parameter descriptions.
1420
+ :see: ``nltk.classify.megam``
1421
+ """
1422
+
1423
+ explicit = True
1424
+ bernoulli = True
1425
+ if "explicit" in kwargs:
1426
+ explicit = kwargs["explicit"]
1427
+ if "bernoulli" in kwargs:
1428
+ bernoulli = kwargs["bernoulli"]
1429
+
1430
+ # Construct an encoding from the training data.
1431
+ if encoding is None:
1432
+ # Count cutoff can also be controlled by megam with the -minfc
1433
+ # option. Not sure where the best place for it is.
1434
+ count_cutoff = kwargs.get("count_cutoff", 0)
1435
+ encoding = BinaryMaxentFeatureEncoding.train(
1436
+ train_toks, count_cutoff, labels=labels, alwayson_features=True
1437
+ )
1438
+ elif labels is not None:
1439
+ raise ValueError("Specify encoding or labels, not both")
1440
+
1441
+ # Write a training file for megam.
1442
+ try:
1443
+ fd, trainfile_name = tempfile.mkstemp(prefix="nltk-")
1444
+ with open(trainfile_name, "w") as trainfile:
1445
+ write_megam_file(
1446
+ train_toks, encoding, trainfile, explicit=explicit, bernoulli=bernoulli
1447
+ )
1448
+ os.close(fd)
1449
+ except (OSError, ValueError) as e:
1450
+ raise ValueError("Error while creating megam training file: %s" % e) from e
1451
+
1452
+ # Run megam on the training file.
1453
+ options = []
1454
+ options += ["-nobias", "-repeat", "10"]
1455
+ if explicit:
1456
+ options += ["-explicit"]
1457
+ if not bernoulli:
1458
+ options += ["-fvals"]
1459
+ if gaussian_prior_sigma:
1460
+ # Lambda is just the precision of the Gaussian prior, i.e. it's the
1461
+ # inverse variance, so the parameter conversion is 1.0/sigma**2.
1462
+ # See https://users.umiacs.umd.edu/~hal/docs/daume04cg-bfgs.pdf
1463
+ inv_variance = 1.0 / gaussian_prior_sigma**2
1464
+ else:
1465
+ inv_variance = 0
1466
+ options += ["-lambda", "%.2f" % inv_variance, "-tune"]
1467
+ if trace < 3:
1468
+ options += ["-quiet"]
1469
+ if "max_iter" in kwargs:
1470
+ options += ["-maxi", "%s" % kwargs["max_iter"]]
1471
+ if "ll_delta" in kwargs:
1472
+ # [xx] this is actually a perplexity delta, not a log
1473
+ # likelihood delta
1474
+ options += ["-dpp", "%s" % abs(kwargs["ll_delta"])]
1475
+ if hasattr(encoding, "cost"):
1476
+ options += ["-multilabel"] # each possible la
1477
+ options += ["multiclass", trainfile_name]
1478
+ stdout = call_megam(options)
1479
+ # print('./megam_i686.opt ', ' '.join(options))
1480
+ # Delete the training file
1481
+ try:
1482
+ os.remove(trainfile_name)
1483
+ except OSError as e:
1484
+ print(f"Warning: unable to delete {trainfile_name}: {e}")
1485
+
1486
+ # Parse the generated weight vector.
1487
+ weights = parse_megam_weights(stdout, encoding.length(), explicit)
1488
+
1489
+ # Convert from base-e to base-2 weights.
1490
+ weights *= numpy.log2(numpy.e)
1491
+
1492
+ # Build the classifier
1493
+ return MaxentClassifier(encoding, weights)
1494
+
1495
+
1496
+ ######################################################################
1497
+ # { Classifier Trainer: tadm
1498
+ ######################################################################
1499
+
1500
+
1501
+ class TadmMaxentClassifier(MaxentClassifier):
1502
+ @classmethod
1503
+ def train(cls, train_toks, **kwargs):
1504
+ algorithm = kwargs.get("algorithm", "tao_lmvm")
1505
+ trace = kwargs.get("trace", 3)
1506
+ encoding = kwargs.get("encoding", None)
1507
+ labels = kwargs.get("labels", None)
1508
+ sigma = kwargs.get("gaussian_prior_sigma", 0)
1509
+ count_cutoff = kwargs.get("count_cutoff", 0)
1510
+ max_iter = kwargs.get("max_iter")
1511
+ ll_delta = kwargs.get("min_lldelta")
1512
+
1513
+ # Construct an encoding from the training data.
1514
+ if not encoding:
1515
+ encoding = TadmEventMaxentFeatureEncoding.train(
1516
+ train_toks, count_cutoff, labels=labels
1517
+ )
1518
+
1519
+ trainfile_fd, trainfile_name = tempfile.mkstemp(
1520
+ prefix="nltk-tadm-events-", suffix=".gz"
1521
+ )
1522
+ weightfile_fd, weightfile_name = tempfile.mkstemp(prefix="nltk-tadm-weights-")
1523
+
1524
+ trainfile = gzip_open_unicode(trainfile_name, "w")
1525
+ write_tadm_file(train_toks, encoding, trainfile)
1526
+ trainfile.close()
1527
+
1528
+ options = []
1529
+ options.extend(["-monitor"])
1530
+ options.extend(["-method", algorithm])
1531
+ if sigma:
1532
+ options.extend(["-l2", "%.6f" % sigma**2])
1533
+ if max_iter:
1534
+ options.extend(["-max_it", "%d" % max_iter])
1535
+ if ll_delta:
1536
+ options.extend(["-fatol", "%.6f" % abs(ll_delta)])
1537
+ options.extend(["-events_in", trainfile_name])
1538
+ options.extend(["-params_out", weightfile_name])
1539
+ if trace < 3:
1540
+ options.extend(["2>&1"])
1541
+ else:
1542
+ options.extend(["-summary"])
1543
+
1544
+ call_tadm(options)
1545
+
1546
+ with open(weightfile_name) as weightfile:
1547
+ weights = parse_tadm_weights(weightfile)
1548
+
1549
+ os.remove(trainfile_name)
1550
+ os.remove(weightfile_name)
1551
+
1552
+ # Convert from base-e to base-2 weights.
1553
+ weights *= numpy.log2(numpy.e)
1554
+
1555
+ # Build the classifier
1556
+ return cls(encoding, weights)
1557
+
1558
+
1559
+ ######################################################################
1560
+ # { Demo
1561
+ ######################################################################
1562
+ def demo():
1563
+ from nltk.classify.util import names_demo
1564
+
1565
+ classifier = names_demo(MaxentClassifier.train)
1566
+
1567
+
1568
+ if __name__ == "__main__":
1569
+ demo()
lib/python3.10/site-packages/nltk/classify/megam.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to Megam Classifier
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A set of functions used to interface with the external megam_ maxent
10
+ optimization package. Before megam can be used, you should tell NLTK where it
11
+ can find the megam binary, using the ``config_megam()`` function. Typical
12
+ usage:
13
+
14
+ >>> from nltk.classify import megam
15
+ >>> megam.config_megam() # pass path to megam if not found in PATH # doctest: +SKIP
16
+ [Found megam: ...]
17
+
18
+ Use with MaxentClassifier. Example below, see MaxentClassifier documentation
19
+ for details.
20
+
21
+ nltk.classify.MaxentClassifier.train(corpus, 'megam')
22
+
23
+ .. _megam: https://www.umiacs.umd.edu/~hal/megam/index.html
24
+ """
25
+ import subprocess
26
+
27
+ from nltk.internals import find_binary
28
+
29
+ try:
30
+ import numpy
31
+ except ImportError:
32
+ numpy = None
33
+
34
+ ######################################################################
35
+ # { Configuration
36
+ ######################################################################
37
+
38
+ _megam_bin = None
39
+
40
+
41
+ def config_megam(bin=None):
42
+ """
43
+ Configure NLTK's interface to the ``megam`` maxent optimization
44
+ package.
45
+
46
+ :param bin: The full path to the ``megam`` binary. If not specified,
47
+ then nltk will search the system for a ``megam`` binary; and if
48
+ one is not found, it will raise a ``LookupError`` exception.
49
+ :type bin: str
50
+ """
51
+ global _megam_bin
52
+ _megam_bin = find_binary(
53
+ "megam",
54
+ bin,
55
+ env_vars=["MEGAM"],
56
+ binary_names=["megam.opt", "megam", "megam_686", "megam_i686.opt"],
57
+ url="https://www.umiacs.umd.edu/~hal/megam/index.html",
58
+ )
59
+
60
+
61
+ ######################################################################
62
+ # { Megam Interface Functions
63
+ ######################################################################
64
+
65
+
66
+ def write_megam_file(train_toks, encoding, stream, bernoulli=True, explicit=True):
67
+ """
68
+ Generate an input file for ``megam`` based on the given corpus of
69
+ classified tokens.
70
+
71
+ :type train_toks: list(tuple(dict, str))
72
+ :param train_toks: Training data, represented as a list of
73
+ pairs, the first member of which is a feature dictionary,
74
+ and the second of which is a classification label.
75
+
76
+ :type encoding: MaxentFeatureEncodingI
77
+ :param encoding: A feature encoding, used to convert featuresets
78
+ into feature vectors. May optionally implement a cost() method
79
+ in order to assign different costs to different class predictions.
80
+
81
+ :type stream: stream
82
+ :param stream: The stream to which the megam input file should be
83
+ written.
84
+
85
+ :param bernoulli: If true, then use the 'bernoulli' format. I.e.,
86
+ all joint features have binary values, and are listed iff they
87
+ are true. Otherwise, list feature values explicitly. If
88
+ ``bernoulli=False``, then you must call ``megam`` with the
89
+ ``-fvals`` option.
90
+
91
+ :param explicit: If true, then use the 'explicit' format. I.e.,
92
+ list the features that would fire for any of the possible
93
+ labels, for each token. If ``explicit=True``, then you must
94
+ call ``megam`` with the ``-explicit`` option.
95
+ """
96
+ # Look up the set of labels.
97
+ labels = encoding.labels()
98
+ labelnum = {label: i for (i, label) in enumerate(labels)}
99
+
100
+ # Write the file, which contains one line per instance.
101
+ for featureset, label in train_toks:
102
+ # First, the instance number (or, in the weighted multiclass case, the cost of each label).
103
+ if hasattr(encoding, "cost"):
104
+ stream.write(
105
+ ":".join(str(encoding.cost(featureset, label, l)) for l in labels)
106
+ )
107
+ else:
108
+ stream.write("%d" % labelnum[label])
109
+
110
+ # For implicit file formats, just list the features that fire
111
+ # for this instance's actual label.
112
+ if not explicit:
113
+ _write_megam_features(encoding.encode(featureset, label), stream, bernoulli)
114
+
115
+ # For explicit formats, list the features that would fire for
116
+ # any of the possible labels.
117
+ else:
118
+ for l in labels:
119
+ stream.write(" #")
120
+ _write_megam_features(encoding.encode(featureset, l), stream, bernoulli)
121
+
122
+ # End of the instance.
123
+ stream.write("\n")
124
+
125
+
126
+ def parse_megam_weights(s, features_count, explicit=True):
127
+ """
128
+ Given the stdout output generated by ``megam`` when training a
129
+ model, return a ``numpy`` array containing the corresponding weight
130
+ vector. This function does not currently handle bias features.
131
+ """
132
+ if numpy is None:
133
+ raise ValueError("This function requires that numpy be installed")
134
+ assert explicit, "non-explicit not supported yet"
135
+ lines = s.strip().split("\n")
136
+ weights = numpy.zeros(features_count, "d")
137
+ for line in lines:
138
+ if line.strip():
139
+ fid, weight = line.split()
140
+ weights[int(fid)] = float(weight)
141
+ return weights
142
+
143
+
144
+ def _write_megam_features(vector, stream, bernoulli):
145
+ if not vector:
146
+ raise ValueError(
147
+ "MEGAM classifier requires the use of an " "always-on feature."
148
+ )
149
+ for (fid, fval) in vector:
150
+ if bernoulli:
151
+ if fval == 1:
152
+ stream.write(" %s" % fid)
153
+ elif fval != 0:
154
+ raise ValueError(
155
+ "If bernoulli=True, then all" "features must be binary."
156
+ )
157
+ else:
158
+ stream.write(f" {fid} {fval}")
159
+
160
+
161
+ def call_megam(args):
162
+ """
163
+ Call the ``megam`` binary with the given arguments.
164
+ """
165
+ if isinstance(args, str):
166
+ raise TypeError("args should be a list of strings")
167
+ if _megam_bin is None:
168
+ config_megam()
169
+
170
+ # Call megam via a subprocess
171
+ cmd = [_megam_bin] + args
172
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
173
+ (stdout, stderr) = p.communicate()
174
+
175
+ # Check the return code.
176
+ if p.returncode != 0:
177
+ print()
178
+ print(stderr)
179
+ raise OSError("megam command failed!")
180
+
181
+ if isinstance(stdout, str):
182
+ return stdout
183
+ else:
184
+ return stdout.decode("utf-8")
lib/python3.10/site-packages/nltk/classify/naivebayes.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Naive Bayes Classifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A classifier based on the Naive Bayes algorithm. In order to find the
10
+ probability for a label, this algorithm first uses the Bayes rule to
11
+ express P(label|features) in terms of P(label) and P(features|label):
12
+
13
+ | P(label) * P(features|label)
14
+ | P(label|features) = ------------------------------
15
+ | P(features)
16
+
17
+ The algorithm then makes the 'naive' assumption that all features are
18
+ independent, given the label:
19
+
20
+ | P(label) * P(f1|label) * ... * P(fn|label)
21
+ | P(label|features) = --------------------------------------------
22
+ | P(features)
23
+
24
+ Rather than computing P(features) explicitly, the algorithm just
25
+ calculates the numerator for each label, and normalizes them so they
26
+ sum to one:
27
+
28
+ | P(label) * P(f1|label) * ... * P(fn|label)
29
+ | P(label|features) = --------------------------------------------
30
+ | SUM[l]( P(l) * P(f1|l) * ... * P(fn|l) )
31
+ """
32
+
33
+ from collections import defaultdict
34
+
35
+ from nltk.classify.api import ClassifierI
36
+ from nltk.probability import DictionaryProbDist, ELEProbDist, FreqDist, sum_logs
37
+
38
+ ##//////////////////////////////////////////////////////
39
+ ## Naive Bayes Classifier
40
+ ##//////////////////////////////////////////////////////
41
+
42
+
43
+ class NaiveBayesClassifier(ClassifierI):
44
+ """
45
+ A Naive Bayes classifier. Naive Bayes classifiers are
46
+ paramaterized by two probability distributions:
47
+
48
+ - P(label) gives the probability that an input will receive each
49
+ label, given no information about the input's features.
50
+
51
+ - P(fname=fval|label) gives the probability that a given feature
52
+ (fname) will receive a given value (fval), given that the
53
+ label (label).
54
+
55
+ If the classifier encounters an input with a feature that has
56
+ never been seen with any label, then rather than assigning a
57
+ probability of 0 to all labels, it will ignore that feature.
58
+
59
+ The feature value 'None' is reserved for unseen feature values;
60
+ you generally should not use 'None' as a feature value for one of
61
+ your own features.
62
+ """
63
+
64
+ def __init__(self, label_probdist, feature_probdist):
65
+ """
66
+ :param label_probdist: P(label), the probability distribution
67
+ over labels. It is expressed as a ``ProbDistI`` whose
68
+ samples are labels. I.e., P(label) =
69
+ ``label_probdist.prob(label)``.
70
+
71
+ :param feature_probdist: P(fname=fval|label), the probability
72
+ distribution for feature values, given labels. It is
73
+ expressed as a dictionary whose keys are ``(label, fname)``
74
+ pairs and whose values are ``ProbDistI`` objects over feature
75
+ values. I.e., P(fname=fval|label) =
76
+ ``feature_probdist[label,fname].prob(fval)``. If a given
77
+ ``(label,fname)`` is not a key in ``feature_probdist``, then
78
+ it is assumed that the corresponding P(fname=fval|label)
79
+ is 0 for all values of ``fval``.
80
+ """
81
+ self._label_probdist = label_probdist
82
+ self._feature_probdist = feature_probdist
83
+ self._labels = list(label_probdist.samples())
84
+
85
+ def labels(self):
86
+ return self._labels
87
+
88
+ def classify(self, featureset):
89
+ return self.prob_classify(featureset).max()
90
+
91
+ def prob_classify(self, featureset):
92
+ # Discard any feature names that we've never seen before.
93
+ # Otherwise, we'll just assign a probability of 0 to
94
+ # everything.
95
+ featureset = featureset.copy()
96
+ for fname in list(featureset.keys()):
97
+ for label in self._labels:
98
+ if (label, fname) in self._feature_probdist:
99
+ break
100
+ else:
101
+ # print('Ignoring unseen feature %s' % fname)
102
+ del featureset[fname]
103
+
104
+ # Find the log probability of each label, given the features.
105
+ # Start with the log probability of the label itself.
106
+ logprob = {}
107
+ for label in self._labels:
108
+ logprob[label] = self._label_probdist.logprob(label)
109
+
110
+ # Then add in the log probability of features given labels.
111
+ for label in self._labels:
112
+ for (fname, fval) in featureset.items():
113
+ if (label, fname) in self._feature_probdist:
114
+ feature_probs = self._feature_probdist[label, fname]
115
+ logprob[label] += feature_probs.logprob(fval)
116
+ else:
117
+ # nb: This case will never come up if the
118
+ # classifier was created by
119
+ # NaiveBayesClassifier.train().
120
+ logprob[label] += sum_logs([]) # = -INF.
121
+
122
+ return DictionaryProbDist(logprob, normalize=True, log=True)
123
+
124
+ def show_most_informative_features(self, n=10):
125
+ # Determine the most relevant features, and display them.
126
+ cpdist = self._feature_probdist
127
+ print("Most Informative Features")
128
+
129
+ for (fname, fval) in self.most_informative_features(n):
130
+
131
+ def labelprob(l):
132
+ return cpdist[l, fname].prob(fval)
133
+
134
+ labels = sorted(
135
+ (l for l in self._labels if fval in cpdist[l, fname].samples()),
136
+ key=lambda element: (-labelprob(element), element),
137
+ reverse=True,
138
+ )
139
+ if len(labels) == 1:
140
+ continue
141
+ l0 = labels[0]
142
+ l1 = labels[-1]
143
+ if cpdist[l0, fname].prob(fval) == 0:
144
+ ratio = "INF"
145
+ else:
146
+ ratio = "%8.1f" % (
147
+ cpdist[l1, fname].prob(fval) / cpdist[l0, fname].prob(fval)
148
+ )
149
+ print(
150
+ "%24s = %-14r %6s : %-6s = %s : 1.0"
151
+ % (fname, fval, ("%s" % l1)[:6], ("%s" % l0)[:6], ratio)
152
+ )
153
+
154
+ def most_informative_features(self, n=100):
155
+ """
156
+ Return a list of the 'most informative' features used by this
157
+ classifier. For the purpose of this function, the
158
+ informativeness of a feature ``(fname,fval)`` is equal to the
159
+ highest value of P(fname=fval|label), for any label, divided by
160
+ the lowest value of P(fname=fval|label), for any label:
161
+
162
+ | max[ P(fname=fval|label1) / P(fname=fval|label2) ]
163
+ """
164
+ if hasattr(self, "_most_informative_features"):
165
+ return self._most_informative_features[:n]
166
+ else:
167
+ # The set of (fname, fval) pairs used by this classifier.
168
+ features = set()
169
+ # The max & min probability associated w/ each (fname, fval)
170
+ # pair. Maps (fname,fval) -> float.
171
+ maxprob = defaultdict(lambda: 0.0)
172
+ minprob = defaultdict(lambda: 1.0)
173
+
174
+ for (label, fname), probdist in self._feature_probdist.items():
175
+ for fval in probdist.samples():
176
+ feature = (fname, fval)
177
+ features.add(feature)
178
+ p = probdist.prob(fval)
179
+ maxprob[feature] = max(p, maxprob[feature])
180
+ minprob[feature] = min(p, minprob[feature])
181
+ if minprob[feature] == 0:
182
+ features.discard(feature)
183
+
184
+ # Convert features to a list, & sort it by how informative
185
+ # features are.
186
+ self._most_informative_features = sorted(
187
+ features,
188
+ key=lambda feature_: (
189
+ minprob[feature_] / maxprob[feature_],
190
+ feature_[0],
191
+ feature_[1] in [None, False, True],
192
+ str(feature_[1]).lower(),
193
+ ),
194
+ )
195
+ return self._most_informative_features[:n]
196
+
197
+ @classmethod
198
+ def train(cls, labeled_featuresets, estimator=ELEProbDist):
199
+ """
200
+ :param labeled_featuresets: A list of classified featuresets,
201
+ i.e., a list of tuples ``(featureset, label)``.
202
+ """
203
+ label_freqdist = FreqDist()
204
+ feature_freqdist = defaultdict(FreqDist)
205
+ feature_values = defaultdict(set)
206
+ fnames = set()
207
+
208
+ # Count up how many times each feature value occurred, given
209
+ # the label and featurename.
210
+ for featureset, label in labeled_featuresets:
211
+ label_freqdist[label] += 1
212
+ for fname, fval in featureset.items():
213
+ # Increment freq(fval|label, fname)
214
+ feature_freqdist[label, fname][fval] += 1
215
+ # Record that fname can take the value fval.
216
+ feature_values[fname].add(fval)
217
+ # Keep a list of all feature names.
218
+ fnames.add(fname)
219
+
220
+ # If a feature didn't have a value given for an instance, then
221
+ # we assume that it gets the implicit value 'None.' This loop
222
+ # counts up the number of 'missing' feature values for each
223
+ # (label,fname) pair, and increments the count of the fval
224
+ # 'None' by that amount.
225
+ for label in label_freqdist:
226
+ num_samples = label_freqdist[label]
227
+ for fname in fnames:
228
+ count = feature_freqdist[label, fname].N()
229
+ # Only add a None key when necessary, i.e. if there are
230
+ # any samples with feature 'fname' missing.
231
+ if num_samples - count > 0:
232
+ feature_freqdist[label, fname][None] += num_samples - count
233
+ feature_values[fname].add(None)
234
+
235
+ # Create the P(label) distribution
236
+ label_probdist = estimator(label_freqdist)
237
+
238
+ # Create the P(fval|label, fname) distribution
239
+ feature_probdist = {}
240
+ for ((label, fname), freqdist) in feature_freqdist.items():
241
+ probdist = estimator(freqdist, bins=len(feature_values[fname]))
242
+ feature_probdist[label, fname] = probdist
243
+
244
+ return cls(label_probdist, feature_probdist)
245
+
246
+
247
+ ##//////////////////////////////////////////////////////
248
+ ## Demo
249
+ ##//////////////////////////////////////////////////////
250
+
251
+
252
+ def demo():
253
+ from nltk.classify.util import names_demo
254
+
255
+ classifier = names_demo(NaiveBayesClassifier.train)
256
+ classifier.show_most_informative_features()
257
+
258
+
259
+ if __name__ == "__main__":
260
+ demo()
lib/python3.10/site-packages/nltk/classify/textcat.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language ID module using TextCat algorithm
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Avital Pekker <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A module for language identification using the TextCat algorithm.
11
+ An implementation of the text categorization algorithm
12
+ presented in Cavnar, W. B. and J. M. Trenkle,
13
+ "N-Gram-Based Text Categorization".
14
+
15
+ The algorithm takes advantage of Zipf's law and uses
16
+ n-gram frequencies to profile languages and text-yet to
17
+ be identified-then compares using a distance measure.
18
+
19
+ Language n-grams are provided by the "An Crubadan"
20
+ project. A corpus reader was created separately to read
21
+ those files.
22
+
23
+ For details regarding the algorithm, see:
24
+ https://www.let.rug.nl/~vannoord/TextCat/textcat.pdf
25
+
26
+ For details about An Crubadan, see:
27
+ https://borel.slu.edu/crubadan/index.html
28
+ """
29
+
30
+ from sys import maxsize
31
+
32
+ from nltk.util import trigrams
33
+
34
+ # Note: this is NOT "re" you're likely used to. The regex module
35
+ # is an alternative to the standard re module that supports
36
+ # Unicode codepoint properties with the \p{} syntax.
37
+ # You may have to "pip install regx"
38
+ try:
39
+ import regex as re
40
+ except ImportError:
41
+ re = None
42
+ ######################################################################
43
+ ## Language identification using TextCat
44
+ ######################################################################
45
+
46
+
47
+ class TextCat:
48
+
49
+ _corpus = None
50
+ fingerprints = {}
51
+ _START_CHAR = "<"
52
+ _END_CHAR = ">"
53
+
54
+ last_distances = {}
55
+
56
+ def __init__(self):
57
+ if not re:
58
+ raise OSError(
59
+ "classify.textcat requires the regex module that "
60
+ "supports unicode. Try '$ pip install regex' and "
61
+ "see https://pypi.python.org/pypi/regex for "
62
+ "further details."
63
+ )
64
+
65
+ from nltk.corpus import crubadan
66
+
67
+ self._corpus = crubadan
68
+ # Load all language ngrams into cache
69
+ for lang in self._corpus.langs():
70
+ self._corpus.lang_freq(lang)
71
+
72
+ def remove_punctuation(self, text):
73
+ """Get rid of punctuation except apostrophes"""
74
+ return re.sub(r"[^\P{P}\']+", "", text)
75
+
76
+ def profile(self, text):
77
+ """Create FreqDist of trigrams within text"""
78
+ from nltk import FreqDist, word_tokenize
79
+
80
+ clean_text = self.remove_punctuation(text)
81
+ tokens = word_tokenize(clean_text)
82
+
83
+ fingerprint = FreqDist()
84
+ for t in tokens:
85
+ token_trigram_tuples = trigrams(self._START_CHAR + t + self._END_CHAR)
86
+ token_trigrams = ["".join(tri) for tri in token_trigram_tuples]
87
+
88
+ for cur_trigram in token_trigrams:
89
+ if cur_trigram in fingerprint:
90
+ fingerprint[cur_trigram] += 1
91
+ else:
92
+ fingerprint[cur_trigram] = 1
93
+
94
+ return fingerprint
95
+
96
+ def calc_dist(self, lang, trigram, text_profile):
97
+ """Calculate the "out-of-place" measure between the
98
+ text and language profile for a single trigram"""
99
+
100
+ lang_fd = self._corpus.lang_freq(lang)
101
+ dist = 0
102
+
103
+ if trigram in lang_fd:
104
+ idx_lang_profile = list(lang_fd.keys()).index(trigram)
105
+ idx_text = list(text_profile.keys()).index(trigram)
106
+
107
+ # print(idx_lang_profile, ", ", idx_text)
108
+ dist = abs(idx_lang_profile - idx_text)
109
+ else:
110
+ # Arbitrary but should be larger than
111
+ # any possible trigram file length
112
+ # in terms of total lines
113
+ dist = maxsize
114
+
115
+ return dist
116
+
117
+ def lang_dists(self, text):
118
+ """Calculate the "out-of-place" measure between
119
+ the text and all languages"""
120
+
121
+ distances = {}
122
+ profile = self.profile(text)
123
+ # For all the languages
124
+ for lang in self._corpus._all_lang_freq.keys():
125
+ # Calculate distance metric for every trigram in
126
+ # input text to be identified
127
+ lang_dist = 0
128
+ for trigram in profile:
129
+ lang_dist += self.calc_dist(lang, trigram, profile)
130
+
131
+ distances[lang] = lang_dist
132
+
133
+ return distances
134
+
135
+ def guess_language(self, text):
136
+ """Find the language with the min distance
137
+ to the text and return its ISO 639-3 code"""
138
+ self.last_distances = self.lang_dists(text)
139
+
140
+ return min(self.last_distances, key=self.last_distances.get)
141
+ #################################################')
142
+
143
+
144
+ def demo():
145
+ from nltk.corpus import udhr
146
+
147
+ langs = [
148
+ "Kurdish-UTF8",
149
+ "Abkhaz-UTF8",
150
+ "Farsi_Persian-UTF8",
151
+ "Hindi-UTF8",
152
+ "Hawaiian-UTF8",
153
+ "Russian-UTF8",
154
+ "Vietnamese-UTF8",
155
+ "Serbian_Srpski-UTF8",
156
+ "Esperanto-UTF8",
157
+ ]
158
+
159
+ friendly = {
160
+ "kmr": "Northern Kurdish",
161
+ "abk": "Abkhazian",
162
+ "pes": "Iranian Persian",
163
+ "hin": "Hindi",
164
+ "haw": "Hawaiian",
165
+ "rus": "Russian",
166
+ "vie": "Vietnamese",
167
+ "srp": "Serbian",
168
+ "epo": "Esperanto",
169
+ }
170
+
171
+ tc = TextCat()
172
+
173
+ for cur_lang in langs:
174
+ # Get raw data from UDHR corpus
175
+ raw_sentences = udhr.sents(cur_lang)
176
+ rows = len(raw_sentences) - 1
177
+ cols = list(map(len, raw_sentences))
178
+
179
+ sample = ""
180
+
181
+ # Generate a sample text of the language
182
+ for i in range(0, rows):
183
+ cur_sent = ""
184
+ for j in range(0, cols[i]):
185
+ cur_sent += " " + raw_sentences[i][j]
186
+
187
+ sample += cur_sent
188
+
189
+ # Try to detect what it is
190
+ print("Language snippet: " + sample[0:140] + "...")
191
+ guess = tc.guess_language(sample)
192
+ print(f"Language detection: {guess} ({friendly[guess]})")
193
+ print("#" * 140)
194
+
195
+
196
+ if __name__ == "__main__":
197
+ demo()
lib/python3.10/site-packages/nltk/cluster/gaac.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Group Average Agglomerative Clusterer
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ try:
9
+ import numpy
10
+ except ImportError:
11
+ pass
12
+
13
+ from nltk.cluster.util import Dendrogram, VectorSpaceClusterer, cosine_distance
14
+
15
+
16
+ class GAAClusterer(VectorSpaceClusterer):
17
+ """
18
+ The Group Average Agglomerative starts with each of the N vectors as singleton
19
+ clusters. It then iteratively merges pairs of clusters which have the
20
+ closest centroids. This continues until there is only one cluster. The
21
+ order of merges gives rise to a dendrogram: a tree with the earlier merges
22
+ lower than later merges. The membership of a given number of clusters c, 1
23
+ <= c <= N, can be found by cutting the dendrogram at depth c.
24
+
25
+ This clusterer uses the cosine similarity metric only, which allows for
26
+ efficient speed-up in the clustering process.
27
+ """
28
+
29
+ def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None):
30
+ VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
31
+ self._num_clusters = num_clusters
32
+ self._dendrogram = None
33
+ self._groups_values = None
34
+
35
+ def cluster(self, vectors, assign_clusters=False, trace=False):
36
+ # stores the merge order
37
+ self._dendrogram = Dendrogram(
38
+ [numpy.array(vector, numpy.float64) for vector in vectors]
39
+ )
40
+ return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace)
41
+
42
+ def cluster_vectorspace(self, vectors, trace=False):
43
+ # variables describing the initial situation
44
+ N = len(vectors)
45
+ cluster_len = [1] * N
46
+ cluster_count = N
47
+ index_map = numpy.arange(N)
48
+
49
+ # construct the similarity matrix
50
+ dims = (N, N)
51
+ dist = numpy.ones(dims, dtype=float) * numpy.inf
52
+ for i in range(N):
53
+ for j in range(i + 1, N):
54
+ dist[i, j] = cosine_distance(vectors[i], vectors[j])
55
+
56
+ while cluster_count > max(self._num_clusters, 1):
57
+ i, j = numpy.unravel_index(dist.argmin(), dims)
58
+ if trace:
59
+ print("merging %d and %d" % (i, j))
60
+
61
+ # update similarities for merging i and j
62
+ self._merge_similarities(dist, cluster_len, i, j)
63
+
64
+ # remove j
65
+ dist[:, j] = numpy.inf
66
+ dist[j, :] = numpy.inf
67
+
68
+ # merge the clusters
69
+ cluster_len[i] = cluster_len[i] + cluster_len[j]
70
+ self._dendrogram.merge(index_map[i], index_map[j])
71
+ cluster_count -= 1
72
+
73
+ # update the index map to reflect the indexes if we
74
+ # had removed j
75
+ index_map[j + 1 :] -= 1
76
+ index_map[j] = N
77
+
78
+ self.update_clusters(self._num_clusters)
79
+
80
+ def _merge_similarities(self, dist, cluster_len, i, j):
81
+ # the new cluster i merged from i and j adopts the average of
82
+ # i and j's similarity to each other cluster, weighted by the
83
+ # number of points in the clusters i and j
84
+ i_weight = cluster_len[i]
85
+ j_weight = cluster_len[j]
86
+ weight_sum = i_weight + j_weight
87
+
88
+ # update for x<i
89
+ dist[:i, i] = dist[:i, i] * i_weight + dist[:i, j] * j_weight
90
+ dist[:i, i] /= weight_sum
91
+ # update for i<x<j
92
+ dist[i, i + 1 : j] = (
93
+ dist[i, i + 1 : j] * i_weight + dist[i + 1 : j, j] * j_weight
94
+ )
95
+ # update for i<j<x
96
+ dist[i, j + 1 :] = dist[i, j + 1 :] * i_weight + dist[j, j + 1 :] * j_weight
97
+ dist[i, i + 1 :] /= weight_sum
98
+
99
+ def update_clusters(self, num_clusters):
100
+ clusters = self._dendrogram.groups(num_clusters)
101
+ self._centroids = []
102
+ for cluster in clusters:
103
+ assert len(cluster) > 0
104
+ if self._should_normalise:
105
+ centroid = self._normalise(cluster[0])
106
+ else:
107
+ centroid = numpy.array(cluster[0])
108
+ for vector in cluster[1:]:
109
+ if self._should_normalise:
110
+ centroid += self._normalise(vector)
111
+ else:
112
+ centroid += vector
113
+ centroid /= len(cluster)
114
+ self._centroids.append(centroid)
115
+ self._num_clusters = len(self._centroids)
116
+
117
+ def classify_vectorspace(self, vector):
118
+ best = None
119
+ for i in range(self._num_clusters):
120
+ centroid = self._centroids[i]
121
+ dist = cosine_distance(vector, centroid)
122
+ if not best or dist < best[0]:
123
+ best = (dist, i)
124
+ return best[1]
125
+
126
+ def dendrogram(self):
127
+ """
128
+ :return: The dendrogram representing the current clustering
129
+ :rtype: Dendrogram
130
+ """
131
+ return self._dendrogram
132
+
133
+ def num_clusters(self):
134
+ return self._num_clusters
135
+
136
+ def __repr__(self):
137
+ return "<GroupAverageAgglomerative Clusterer n=%d>" % self._num_clusters
138
+
139
+
140
+ def demo():
141
+ """
142
+ Non-interactive demonstration of the clusterers with simple 2-D data.
143
+ """
144
+
145
+ from nltk.cluster import GAAClusterer
146
+
147
+ # use a set of tokens with 2D indices
148
+ vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
149
+
150
+ # test the GAAC clusterer with 4 clusters
151
+ clusterer = GAAClusterer(4)
152
+ clusters = clusterer.cluster(vectors, True)
153
+
154
+ print("Clusterer:", clusterer)
155
+ print("Clustered:", vectors)
156
+ print("As:", clusters)
157
+ print()
158
+
159
+ # show the dendrogram
160
+ clusterer.dendrogram().show()
161
+
162
+ # classify a new vector
163
+ vector = numpy.array([3, 3])
164
+ print("classify(%s):" % vector, end=" ")
165
+ print(clusterer.classify(vector))
166
+ print()
167
+
168
+
169
+ if __name__ == "__main__":
170
+ demo()
lib/python3.10/site-packages/nltk/cluster/kmeans.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: K-Means Clusterer
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import copy
9
+ import random
10
+ import sys
11
+
12
+ try:
13
+ import numpy
14
+ except ImportError:
15
+ pass
16
+
17
+
18
+ from nltk.cluster.util import VectorSpaceClusterer
19
+
20
+
21
+ class KMeansClusterer(VectorSpaceClusterer):
22
+ """
23
+ The K-means clusterer starts with k arbitrary chosen means then allocates
24
+ each vector to the cluster with the closest mean. It then recalculates the
25
+ means of each cluster as the centroid of the vectors in the cluster. This
26
+ process repeats until the cluster memberships stabilise. This is a
27
+ hill-climbing algorithm which may converge to a local maximum. Hence the
28
+ clustering is often repeated with random initial means and the most
29
+ commonly occurring output means are chosen.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ num_means,
35
+ distance,
36
+ repeats=1,
37
+ conv_test=1e-6,
38
+ initial_means=None,
39
+ normalise=False,
40
+ svd_dimensions=None,
41
+ rng=None,
42
+ avoid_empty_clusters=False,
43
+ ):
44
+
45
+ """
46
+ :param num_means: the number of means to use (may use fewer)
47
+ :type num_means: int
48
+ :param distance: measure of distance between two vectors
49
+ :type distance: function taking two vectors and returning a float
50
+ :param repeats: number of randomised clustering trials to use
51
+ :type repeats: int
52
+ :param conv_test: maximum variation in mean differences before
53
+ deemed convergent
54
+ :type conv_test: number
55
+ :param initial_means: set of k initial means
56
+ :type initial_means: sequence of vectors
57
+ :param normalise: should vectors be normalised to length 1
58
+ :type normalise: boolean
59
+ :param svd_dimensions: number of dimensions to use in reducing vector
60
+ dimensionsionality with SVD
61
+ :type svd_dimensions: int
62
+ :param rng: random number generator (or None)
63
+ :type rng: Random
64
+ :param avoid_empty_clusters: include current centroid in computation
65
+ of next one; avoids undefined behavior
66
+ when clusters become empty
67
+ :type avoid_empty_clusters: boolean
68
+ """
69
+ VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
70
+ self._num_means = num_means
71
+ self._distance = distance
72
+ self._max_difference = conv_test
73
+ assert not initial_means or len(initial_means) == num_means
74
+ self._means = initial_means
75
+ assert repeats >= 1
76
+ assert not (initial_means and repeats > 1)
77
+ self._repeats = repeats
78
+ self._rng = rng if rng else random.Random()
79
+ self._avoid_empty_clusters = avoid_empty_clusters
80
+
81
+ def cluster_vectorspace(self, vectors, trace=False):
82
+ if self._means and self._repeats > 1:
83
+ print("Warning: means will be discarded for subsequent trials")
84
+
85
+ meanss = []
86
+ for trial in range(self._repeats):
87
+ if trace:
88
+ print("k-means trial", trial)
89
+ if not self._means or trial > 1:
90
+ self._means = self._rng.sample(list(vectors), self._num_means)
91
+ self._cluster_vectorspace(vectors, trace)
92
+ meanss.append(self._means)
93
+
94
+ if len(meanss) > 1:
95
+ # sort the means first (so that different cluster numbering won't
96
+ # effect the distance comparison)
97
+ for means in meanss:
98
+ means.sort(key=sum)
99
+
100
+ # find the set of means that's minimally different from the others
101
+ min_difference = min_means = None
102
+ for i in range(len(meanss)):
103
+ d = 0
104
+ for j in range(len(meanss)):
105
+ if i != j:
106
+ d += self._sum_distances(meanss[i], meanss[j])
107
+ if min_difference is None or d < min_difference:
108
+ min_difference, min_means = d, meanss[i]
109
+
110
+ # use the best means
111
+ self._means = min_means
112
+
113
+ def _cluster_vectorspace(self, vectors, trace=False):
114
+ if self._num_means < len(vectors):
115
+ # perform k-means clustering
116
+ converged = False
117
+ while not converged:
118
+ # assign the tokens to clusters based on minimum distance to
119
+ # the cluster means
120
+ clusters = [[] for m in range(self._num_means)]
121
+ for vector in vectors:
122
+ index = self.classify_vectorspace(vector)
123
+ clusters[index].append(vector)
124
+
125
+ if trace:
126
+ print("iteration")
127
+ # for i in range(self._num_means):
128
+ # print ' mean', i, 'allocated', len(clusters[i]), 'vectors'
129
+
130
+ # recalculate cluster means by computing the centroid of each cluster
131
+ new_means = list(map(self._centroid, clusters, self._means))
132
+
133
+ # measure the degree of change from the previous step for convergence
134
+ difference = self._sum_distances(self._means, new_means)
135
+ if difference < self._max_difference:
136
+ converged = True
137
+
138
+ # remember the new means
139
+ self._means = new_means
140
+
141
+ def classify_vectorspace(self, vector):
142
+ # finds the closest cluster centroid
143
+ # returns that cluster's index
144
+ best_distance = best_index = None
145
+ for index in range(len(self._means)):
146
+ mean = self._means[index]
147
+ dist = self._distance(vector, mean)
148
+ if best_distance is None or dist < best_distance:
149
+ best_index, best_distance = index, dist
150
+ return best_index
151
+
152
+ def num_clusters(self):
153
+ if self._means:
154
+ return len(self._means)
155
+ else:
156
+ return self._num_means
157
+
158
+ def means(self):
159
+ """
160
+ The means used for clustering.
161
+ """
162
+ return self._means
163
+
164
+ def _sum_distances(self, vectors1, vectors2):
165
+ difference = 0.0
166
+ for u, v in zip(vectors1, vectors2):
167
+ difference += self._distance(u, v)
168
+ return difference
169
+
170
+ def _centroid(self, cluster, mean):
171
+ if self._avoid_empty_clusters:
172
+ centroid = copy.copy(mean)
173
+ for vector in cluster:
174
+ centroid += vector
175
+ return centroid / (1 + len(cluster))
176
+ else:
177
+ if not len(cluster):
178
+ sys.stderr.write("Error: no centroid defined for empty cluster.\n")
179
+ sys.stderr.write(
180
+ "Try setting argument 'avoid_empty_clusters' to True\n"
181
+ )
182
+ assert False
183
+ centroid = copy.copy(cluster[0])
184
+ for vector in cluster[1:]:
185
+ centroid += vector
186
+ return centroid / len(cluster)
187
+
188
+ def __repr__(self):
189
+ return "<KMeansClusterer means=%s repeats=%d>" % (self._means, self._repeats)
190
+
191
+
192
+ #################################################################################
193
+
194
+
195
+ def demo():
196
+ # example from figure 14.9, page 517, Manning and Schutze
197
+
198
+ from nltk.cluster import KMeansClusterer, euclidean_distance
199
+
200
+ vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]]
201
+ means = [[4, 3], [5, 5]]
202
+
203
+ clusterer = KMeansClusterer(2, euclidean_distance, initial_means=means)
204
+ clusters = clusterer.cluster(vectors, True, trace=True)
205
+
206
+ print("Clustered:", vectors)
207
+ print("As:", clusters)
208
+ print("Means:", clusterer.means())
209
+ print()
210
+
211
+ vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
212
+
213
+ # test k-means using the euclidean distance metric, 2 means and repeat
214
+ # clustering 10 times with random seeds
215
+
216
+ clusterer = KMeansClusterer(2, euclidean_distance, repeats=10)
217
+ clusters = clusterer.cluster(vectors, True)
218
+ print("Clustered:", vectors)
219
+ print("As:", clusters)
220
+ print("Means:", clusterer.means())
221
+ print()
222
+
223
+ # classify a new vector
224
+ vector = numpy.array([3, 3])
225
+ print("classify(%s):" % vector, end=" ")
226
+ print(clusterer.classify(vector))
227
+ print()
228
+
229
+
230
+ if __name__ == "__main__":
231
+ demo()
lib/python3.10/site-packages/nltk/cluster/util.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Clusterer Utilities
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # Contributor: J Richard Snape
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ import copy
9
+ from abc import abstractmethod
10
+ from math import sqrt
11
+ from sys import stdout
12
+
13
+ try:
14
+ import numpy
15
+ except ImportError:
16
+ pass
17
+
18
+ from nltk.cluster.api import ClusterI
19
+
20
+
21
+ class VectorSpaceClusterer(ClusterI):
22
+ """
23
+ Abstract clusterer which takes tokens and maps them into a vector space.
24
+ Optionally performs singular value decomposition to reduce the
25
+ dimensionality.
26
+ """
27
+
28
+ def __init__(self, normalise=False, svd_dimensions=None):
29
+ """
30
+ :param normalise: should vectors be normalised to length 1
31
+ :type normalise: boolean
32
+ :param svd_dimensions: number of dimensions to use in reducing vector
33
+ dimensionsionality with SVD
34
+ :type svd_dimensions: int
35
+ """
36
+ self._Tt = None
37
+ self._should_normalise = normalise
38
+ self._svd_dimensions = svd_dimensions
39
+
40
+ def cluster(self, vectors, assign_clusters=False, trace=False):
41
+ assert len(vectors) > 0
42
+
43
+ # normalise the vectors
44
+ if self._should_normalise:
45
+ vectors = list(map(self._normalise, vectors))
46
+
47
+ # use SVD to reduce the dimensionality
48
+ if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
49
+ [u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors)))
50
+ S = d[: self._svd_dimensions] * numpy.identity(
51
+ self._svd_dimensions, numpy.float64
52
+ )
53
+ T = u[:, : self._svd_dimensions]
54
+ Dt = vt[: self._svd_dimensions, :]
55
+ vectors = numpy.transpose(numpy.dot(S, Dt))
56
+ self._Tt = numpy.transpose(T)
57
+
58
+ # call abstract method to cluster the vectors
59
+ self.cluster_vectorspace(vectors, trace)
60
+
61
+ # assign the vectors to clusters
62
+ if assign_clusters:
63
+ return [self.classify(vector) for vector in vectors]
64
+
65
+ @abstractmethod
66
+ def cluster_vectorspace(self, vectors, trace):
67
+ """
68
+ Finds the clusters using the given set of vectors.
69
+ """
70
+
71
+ def classify(self, vector):
72
+ if self._should_normalise:
73
+ vector = self._normalise(vector)
74
+ if self._Tt is not None:
75
+ vector = numpy.dot(self._Tt, vector)
76
+ cluster = self.classify_vectorspace(vector)
77
+ return self.cluster_name(cluster)
78
+
79
+ @abstractmethod
80
+ def classify_vectorspace(self, vector):
81
+ """
82
+ Returns the index of the appropriate cluster for the vector.
83
+ """
84
+
85
+ def likelihood(self, vector, label):
86
+ if self._should_normalise:
87
+ vector = self._normalise(vector)
88
+ if self._Tt is not None:
89
+ vector = numpy.dot(self._Tt, vector)
90
+ return self.likelihood_vectorspace(vector, label)
91
+
92
+ def likelihood_vectorspace(self, vector, cluster):
93
+ """
94
+ Returns the likelihood of the vector belonging to the cluster.
95
+ """
96
+ predicted = self.classify_vectorspace(vector)
97
+ return 1.0 if cluster == predicted else 0.0
98
+
99
+ def vector(self, vector):
100
+ """
101
+ Returns the vector after normalisation and dimensionality reduction
102
+ """
103
+ if self._should_normalise:
104
+ vector = self._normalise(vector)
105
+ if self._Tt is not None:
106
+ vector = numpy.dot(self._Tt, vector)
107
+ return vector
108
+
109
+ def _normalise(self, vector):
110
+ """
111
+ Normalises the vector to unit length.
112
+ """
113
+ return vector / sqrt(numpy.dot(vector, vector))
114
+
115
+
116
+ def euclidean_distance(u, v):
117
+ """
118
+ Returns the euclidean distance between vectors u and v. This is equivalent
119
+ to the length of the vector (u - v).
120
+ """
121
+ diff = u - v
122
+ return sqrt(numpy.dot(diff, diff))
123
+
124
+
125
+ def cosine_distance(u, v):
126
+ """
127
+ Returns 1 minus the cosine of the angle between vectors v and u. This is
128
+ equal to ``1 - (u.v / |u||v|)``.
129
+ """
130
+ return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v))))
131
+
132
+
133
+ class _DendrogramNode:
134
+ """Tree node of a dendrogram."""
135
+
136
+ def __init__(self, value, *children):
137
+ self._value = value
138
+ self._children = children
139
+
140
+ def leaves(self, values=True):
141
+ if self._children:
142
+ leaves = []
143
+ for child in self._children:
144
+ leaves.extend(child.leaves(values))
145
+ return leaves
146
+ elif values:
147
+ return [self._value]
148
+ else:
149
+ return [self]
150
+
151
+ def groups(self, n):
152
+ queue = [(self._value, self)]
153
+
154
+ while len(queue) < n:
155
+ priority, node = queue.pop()
156
+ if not node._children:
157
+ queue.push((priority, node))
158
+ break
159
+ for child in node._children:
160
+ if child._children:
161
+ queue.append((child._value, child))
162
+ else:
163
+ queue.append((0, child))
164
+ # makes the earliest merges at the start, latest at the end
165
+ queue.sort()
166
+
167
+ groups = []
168
+ for priority, node in queue:
169
+ groups.append(node.leaves())
170
+ return groups
171
+
172
+ def __lt__(self, comparator):
173
+ return cosine_distance(self._value, comparator._value) < 0
174
+
175
+
176
+ class Dendrogram:
177
+ """
178
+ Represents a dendrogram, a tree with a specified branching order. This
179
+ must be initialised with the leaf items, then iteratively call merge for
180
+ each branch. This class constructs a tree representing the order of calls
181
+ to the merge function.
182
+ """
183
+
184
+ def __init__(self, items=[]):
185
+ """
186
+ :param items: the items at the leaves of the dendrogram
187
+ :type items: sequence of (any)
188
+ """
189
+ self._items = [_DendrogramNode(item) for item in items]
190
+ self._original_items = copy.copy(self._items)
191
+ self._merge = 1
192
+
193
+ def merge(self, *indices):
194
+ """
195
+ Merges nodes at given indices in the dendrogram. The nodes will be
196
+ combined which then replaces the first node specified. All other nodes
197
+ involved in the merge will be removed.
198
+
199
+ :param indices: indices of the items to merge (at least two)
200
+ :type indices: seq of int
201
+ """
202
+ assert len(indices) >= 2
203
+ node = _DendrogramNode(self._merge, *(self._items[i] for i in indices))
204
+ self._merge += 1
205
+ self._items[indices[0]] = node
206
+ for i in indices[1:]:
207
+ del self._items[i]
208
+
209
+ def groups(self, n):
210
+ """
211
+ Finds the n-groups of items (leaves) reachable from a cut at depth n.
212
+ :param n: number of groups
213
+ :type n: int
214
+ """
215
+ if len(self._items) > 1:
216
+ root = _DendrogramNode(self._merge, *self._items)
217
+ else:
218
+ root = self._items[0]
219
+ return root.groups(n)
220
+
221
+ def show(self, leaf_labels=[]):
222
+ """
223
+ Print the dendrogram in ASCII art to standard out.
224
+
225
+ :param leaf_labels: an optional list of strings to use for labeling the
226
+ leaves
227
+ :type leaf_labels: list
228
+ """
229
+
230
+ # ASCII rendering characters
231
+ JOIN, HLINK, VLINK = "+", "-", "|"
232
+
233
+ # find the root (or create one)
234
+ if len(self._items) > 1:
235
+ root = _DendrogramNode(self._merge, *self._items)
236
+ else:
237
+ root = self._items[0]
238
+ leaves = self._original_items
239
+
240
+ if leaf_labels:
241
+ last_row = leaf_labels
242
+ else:
243
+ last_row = ["%s" % leaf._value for leaf in leaves]
244
+
245
+ # find the bottom row and the best cell width
246
+ width = max(map(len, last_row)) + 1
247
+ lhalf = width // 2
248
+ rhalf = int(width - lhalf - 1)
249
+
250
+ # display functions
251
+ def format(centre, left=" ", right=" "):
252
+ return f"{lhalf * left}{centre}{right * rhalf}"
253
+
254
+ def display(str):
255
+ stdout.write(str)
256
+
257
+ # for each merge, top down
258
+ queue = [(root._value, root)]
259
+ verticals = [format(" ") for leaf in leaves]
260
+ while queue:
261
+ priority, node = queue.pop()
262
+ child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children))
263
+ indices = list(map(leaves.index, child_left_leaf))
264
+ if child_left_leaf:
265
+ min_idx = min(indices)
266
+ max_idx = max(indices)
267
+ for i in range(len(leaves)):
268
+ if leaves[i] in child_left_leaf:
269
+ if i == min_idx:
270
+ display(format(JOIN, " ", HLINK))
271
+ elif i == max_idx:
272
+ display(format(JOIN, HLINK, " "))
273
+ else:
274
+ display(format(JOIN, HLINK, HLINK))
275
+ verticals[i] = format(VLINK)
276
+ elif min_idx <= i <= max_idx:
277
+ display(format(HLINK, HLINK, HLINK))
278
+ else:
279
+ display(verticals[i])
280
+ display("\n")
281
+ for child in node._children:
282
+ if child._children:
283
+ queue.append((child._value, child))
284
+ queue.sort()
285
+
286
+ for vertical in verticals:
287
+ display(vertical)
288
+ display("\n")
289
+
290
+ # finally, display the last line
291
+ display("".join(item.center(width) for item in last_row))
292
+ display("\n")
293
+
294
+ def __repr__(self):
295
+ if len(self._items) > 1:
296
+ root = _DendrogramNode(self._merge, *self._items)
297
+ else:
298
+ root = self._items[0]
299
+ leaves = root.leaves(False)
300
+ return "<Dendrogram with %d leaves>" % len(leaves)
lib/python3.10/site-packages/nltk/corpus/__init__.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # TODO this docstring isn't up-to-date!
9
+ """
10
+ NLTK corpus readers. The modules in this package provide functions
11
+ that can be used to read corpus files in a variety of formats. These
12
+ functions can be used to read both the corpus files that are
13
+ distributed in the NLTK corpus package, and corpus files that are part
14
+ of external corpora.
15
+
16
+ Available Corpora
17
+ =================
18
+
19
+ Please see https://www.nltk.org/nltk_data/ for a complete list.
20
+ Install corpora using nltk.download().
21
+
22
+ Corpus Reader Functions
23
+ =======================
24
+ Each corpus module defines one or more "corpus reader functions",
25
+ which can be used to read documents from that corpus. These functions
26
+ take an argument, ``item``, which is used to indicate which document
27
+ should be read from the corpus:
28
+
29
+ - If ``item`` is one of the unique identifiers listed in the corpus
30
+ module's ``items`` variable, then the corresponding document will
31
+ be loaded from the NLTK corpus package.
32
+ - If ``item`` is a filename, then that file will be read.
33
+
34
+ Additionally, corpus reader functions can be given lists of item
35
+ names; in which case, they will return a concatenation of the
36
+ corresponding documents.
37
+
38
+ Corpus reader functions are named based on the type of information
39
+ they return. Some common examples, and their return types, are:
40
+
41
+ - words(): list of str
42
+ - sents(): list of (list of str)
43
+ - paras(): list of (list of (list of str))
44
+ - tagged_words(): list of (str,str) tuple
45
+ - tagged_sents(): list of (list of (str,str))
46
+ - tagged_paras(): list of (list of (list of (str,str)))
47
+ - chunked_sents(): list of (Tree w/ (str,str) leaves)
48
+ - parsed_sents(): list of (Tree with str leaves)
49
+ - parsed_paras(): list of (list of (Tree with str leaves))
50
+ - xml(): A single xml ElementTree
51
+ - raw(): unprocessed corpus contents
52
+
53
+ For example, to read a list of the words in the Brown Corpus, use
54
+ ``nltk.corpus.brown.words()``:
55
+
56
+ >>> from nltk.corpus import brown
57
+ >>> print(", ".join(brown.words())) # doctest: +ELLIPSIS
58
+ The, Fulton, County, Grand, Jury, said, ...
59
+
60
+ """
61
+
62
+ import re
63
+
64
+ from nltk.corpus.reader import *
65
+ from nltk.corpus.util import LazyCorpusLoader
66
+ from nltk.tokenize import RegexpTokenizer
67
+
68
+ abc: PlaintextCorpusReader = LazyCorpusLoader(
69
+ "abc",
70
+ PlaintextCorpusReader,
71
+ r"(?!\.).*\.txt",
72
+ encoding=[("science", "latin_1"), ("rural", "utf8")],
73
+ )
74
+ alpino: AlpinoCorpusReader = LazyCorpusLoader(
75
+ "alpino", AlpinoCorpusReader, tagset="alpino"
76
+ )
77
+ bcp47: BCP47CorpusReader = LazyCorpusLoader(
78
+ "bcp47", BCP47CorpusReader, r"(cldr|iana)/*"
79
+ )
80
+ brown: CategorizedTaggedCorpusReader = LazyCorpusLoader(
81
+ "brown",
82
+ CategorizedTaggedCorpusReader,
83
+ r"c[a-z]\d\d",
84
+ cat_file="cats.txt",
85
+ tagset="brown",
86
+ encoding="ascii",
87
+ )
88
+ cess_cat: BracketParseCorpusReader = LazyCorpusLoader(
89
+ "cess_cat",
90
+ BracketParseCorpusReader,
91
+ r"(?!\.).*\.tbf",
92
+ tagset="unknown",
93
+ encoding="ISO-8859-15",
94
+ )
95
+ cess_esp: BracketParseCorpusReader = LazyCorpusLoader(
96
+ "cess_esp",
97
+ BracketParseCorpusReader,
98
+ r"(?!\.).*\.tbf",
99
+ tagset="unknown",
100
+ encoding="ISO-8859-15",
101
+ )
102
+ cmudict: CMUDictCorpusReader = LazyCorpusLoader(
103
+ "cmudict", CMUDictCorpusReader, ["cmudict"]
104
+ )
105
+ comtrans: AlignedCorpusReader = LazyCorpusLoader(
106
+ "comtrans", AlignedCorpusReader, r"(?!\.).*\.txt"
107
+ )
108
+ comparative_sentences: ComparativeSentencesCorpusReader = LazyCorpusLoader(
109
+ "comparative_sentences",
110
+ ComparativeSentencesCorpusReader,
111
+ r"labeledSentences\.txt",
112
+ encoding="latin-1",
113
+ )
114
+ conll2000: ConllChunkCorpusReader = LazyCorpusLoader(
115
+ "conll2000",
116
+ ConllChunkCorpusReader,
117
+ ["train.txt", "test.txt"],
118
+ ("NP", "VP", "PP"),
119
+ tagset="wsj",
120
+ encoding="ascii",
121
+ )
122
+ conll2002: ConllChunkCorpusReader = LazyCorpusLoader(
123
+ "conll2002",
124
+ ConllChunkCorpusReader,
125
+ r".*\.(test|train).*",
126
+ ("LOC", "PER", "ORG", "MISC"),
127
+ encoding="utf-8",
128
+ )
129
+ conll2007: DependencyCorpusReader = LazyCorpusLoader(
130
+ "conll2007",
131
+ DependencyCorpusReader,
132
+ r".*\.(test|train).*",
133
+ encoding=[("eus", "ISO-8859-2"), ("esp", "utf8")],
134
+ )
135
+ crubadan: CrubadanCorpusReader = LazyCorpusLoader(
136
+ "crubadan", CrubadanCorpusReader, r".*\.txt"
137
+ )
138
+ dependency_treebank: DependencyCorpusReader = LazyCorpusLoader(
139
+ "dependency_treebank", DependencyCorpusReader, r".*\.dp", encoding="ascii"
140
+ )
141
+ extended_omw: CorpusReader = LazyCorpusLoader(
142
+ "extended_omw", CorpusReader, r".*/wn-[a-z\-]*\.tab", encoding="utf8"
143
+ )
144
+ floresta: BracketParseCorpusReader = LazyCorpusLoader(
145
+ "floresta",
146
+ BracketParseCorpusReader,
147
+ r"(?!\.).*\.ptb",
148
+ "#",
149
+ tagset="unknown",
150
+ encoding="ISO-8859-15",
151
+ )
152
+ framenet15: FramenetCorpusReader = LazyCorpusLoader(
153
+ "framenet_v15",
154
+ FramenetCorpusReader,
155
+ [
156
+ "frRelation.xml",
157
+ "frameIndex.xml",
158
+ "fulltextIndex.xml",
159
+ "luIndex.xml",
160
+ "semTypes.xml",
161
+ ],
162
+ )
163
+ framenet: FramenetCorpusReader = LazyCorpusLoader(
164
+ "framenet_v17",
165
+ FramenetCorpusReader,
166
+ [
167
+ "frRelation.xml",
168
+ "frameIndex.xml",
169
+ "fulltextIndex.xml",
170
+ "luIndex.xml",
171
+ "semTypes.xml",
172
+ ],
173
+ )
174
+ gazetteers: WordListCorpusReader = LazyCorpusLoader(
175
+ "gazetteers", WordListCorpusReader, r"(?!LICENSE|\.).*\.txt", encoding="ISO-8859-2"
176
+ )
177
+ genesis: PlaintextCorpusReader = LazyCorpusLoader(
178
+ "genesis",
179
+ PlaintextCorpusReader,
180
+ r"(?!\.).*\.txt",
181
+ encoding=[
182
+ ("finnish|french|german", "latin_1"),
183
+ ("swedish", "cp865"),
184
+ (".*", "utf_8"),
185
+ ],
186
+ )
187
+ gutenberg: PlaintextCorpusReader = LazyCorpusLoader(
188
+ "gutenberg", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1"
189
+ )
190
+ ieer: IEERCorpusReader = LazyCorpusLoader("ieer", IEERCorpusReader, r"(?!README|\.).*")
191
+ inaugural: PlaintextCorpusReader = LazyCorpusLoader(
192
+ "inaugural", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1"
193
+ )
194
+ # [XX] This should probably just use TaggedCorpusReader:
195
+ indian: IndianCorpusReader = LazyCorpusLoader(
196
+ "indian", IndianCorpusReader, r"(?!\.).*\.pos", tagset="unknown", encoding="utf8"
197
+ )
198
+
199
+ jeita: ChasenCorpusReader = LazyCorpusLoader(
200
+ "jeita", ChasenCorpusReader, r".*\.chasen", encoding="utf-8"
201
+ )
202
+ knbc: KNBCorpusReader = LazyCorpusLoader(
203
+ "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp"
204
+ )
205
+ lin_thesaurus: LinThesaurusCorpusReader = LazyCorpusLoader(
206
+ "lin_thesaurus", LinThesaurusCorpusReader, r".*\.lsp"
207
+ )
208
+ mac_morpho: MacMorphoCorpusReader = LazyCorpusLoader(
209
+ "mac_morpho",
210
+ MacMorphoCorpusReader,
211
+ r"(?!\.).*\.txt",
212
+ tagset="unknown",
213
+ encoding="latin-1",
214
+ )
215
+ machado: PortugueseCategorizedPlaintextCorpusReader = LazyCorpusLoader(
216
+ "machado",
217
+ PortugueseCategorizedPlaintextCorpusReader,
218
+ r"(?!\.).*\.txt",
219
+ cat_pattern=r"([a-z]*)/.*",
220
+ encoding="latin-1",
221
+ )
222
+ masc_tagged: CategorizedTaggedCorpusReader = LazyCorpusLoader(
223
+ "masc_tagged",
224
+ CategorizedTaggedCorpusReader,
225
+ r"(spoken|written)/.*\.txt",
226
+ cat_file="categories.txt",
227
+ tagset="wsj",
228
+ encoding="utf-8",
229
+ sep="_",
230
+ )
231
+ movie_reviews: CategorizedPlaintextCorpusReader = LazyCorpusLoader(
232
+ "movie_reviews",
233
+ CategorizedPlaintextCorpusReader,
234
+ r"(?!\.).*\.txt",
235
+ cat_pattern=r"(neg|pos)/.*",
236
+ encoding="ascii",
237
+ )
238
+ multext_east: MTECorpusReader = LazyCorpusLoader(
239
+ "mte_teip5", MTECorpusReader, r"(oana).*\.xml", encoding="utf-8"
240
+ )
241
+ names: WordListCorpusReader = LazyCorpusLoader(
242
+ "names", WordListCorpusReader, r"(?!\.).*\.txt", encoding="ascii"
243
+ )
244
+ nps_chat: NPSChatCorpusReader = LazyCorpusLoader(
245
+ "nps_chat", NPSChatCorpusReader, r"(?!README|\.).*\.xml", tagset="wsj"
246
+ )
247
+ opinion_lexicon: OpinionLexiconCorpusReader = LazyCorpusLoader(
248
+ "opinion_lexicon",
249
+ OpinionLexiconCorpusReader,
250
+ r"(\w+)\-words\.txt",
251
+ encoding="ISO-8859-2",
252
+ )
253
+ ppattach: PPAttachmentCorpusReader = LazyCorpusLoader(
254
+ "ppattach", PPAttachmentCorpusReader, ["training", "test", "devset"]
255
+ )
256
+ product_reviews_1: ReviewsCorpusReader = LazyCorpusLoader(
257
+ "product_reviews_1", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8"
258
+ )
259
+ product_reviews_2: ReviewsCorpusReader = LazyCorpusLoader(
260
+ "product_reviews_2", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8"
261
+ )
262
+ pros_cons: ProsConsCorpusReader = LazyCorpusLoader(
263
+ "pros_cons",
264
+ ProsConsCorpusReader,
265
+ r"Integrated(Cons|Pros)\.txt",
266
+ cat_pattern=r"Integrated(Cons|Pros)\.txt",
267
+ encoding="ISO-8859-2",
268
+ )
269
+ ptb: CategorizedBracketParseCorpusReader = (
270
+ LazyCorpusLoader( # Penn Treebank v3: WSJ and Brown portions
271
+ "ptb",
272
+ CategorizedBracketParseCorpusReader,
273
+ r"(WSJ/\d\d/WSJ_\d\d|BROWN/C[A-Z]/C[A-Z])\d\d.MRG",
274
+ cat_file="allcats.txt",
275
+ tagset="wsj",
276
+ )
277
+ )
278
+ qc: StringCategoryCorpusReader = LazyCorpusLoader(
279
+ "qc", StringCategoryCorpusReader, ["train.txt", "test.txt"], encoding="ISO-8859-2"
280
+ )
281
+ reuters: CategorizedPlaintextCorpusReader = LazyCorpusLoader(
282
+ "reuters",
283
+ CategorizedPlaintextCorpusReader,
284
+ "(training|test).*",
285
+ cat_file="cats.txt",
286
+ encoding="ISO-8859-2",
287
+ )
288
+ rte: RTECorpusReader = LazyCorpusLoader("rte", RTECorpusReader, r"(?!\.).*\.xml")
289
+ senseval: SensevalCorpusReader = LazyCorpusLoader(
290
+ "senseval", SensevalCorpusReader, r"(?!\.).*\.pos"
291
+ )
292
+ sentence_polarity: CategorizedSentencesCorpusReader = LazyCorpusLoader(
293
+ "sentence_polarity",
294
+ CategorizedSentencesCorpusReader,
295
+ r"rt-polarity\.(neg|pos)",
296
+ cat_pattern=r"rt-polarity\.(neg|pos)",
297
+ encoding="utf-8",
298
+ )
299
+ sentiwordnet: SentiWordNetCorpusReader = LazyCorpusLoader(
300
+ "sentiwordnet", SentiWordNetCorpusReader, "SentiWordNet_3.0.0.txt", encoding="utf-8"
301
+ )
302
+ shakespeare: XMLCorpusReader = LazyCorpusLoader(
303
+ "shakespeare", XMLCorpusReader, r"(?!\.).*\.xml"
304
+ )
305
+ sinica_treebank: SinicaTreebankCorpusReader = LazyCorpusLoader(
306
+ "sinica_treebank",
307
+ SinicaTreebankCorpusReader,
308
+ ["parsed"],
309
+ tagset="unknown",
310
+ encoding="utf-8",
311
+ )
312
+ state_union: PlaintextCorpusReader = LazyCorpusLoader(
313
+ "state_union", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="ISO-8859-2"
314
+ )
315
+ stopwords: WordListCorpusReader = LazyCorpusLoader(
316
+ "stopwords", WordListCorpusReader, r"(?!README|\.).*", encoding="utf8"
317
+ )
318
+ subjectivity: CategorizedSentencesCorpusReader = LazyCorpusLoader(
319
+ "subjectivity",
320
+ CategorizedSentencesCorpusReader,
321
+ r"(quote.tok.gt9|plot.tok.gt9)\.5000",
322
+ cat_map={"quote.tok.gt9.5000": ["subj"], "plot.tok.gt9.5000": ["obj"]},
323
+ encoding="latin-1",
324
+ )
325
+ swadesh: SwadeshCorpusReader = LazyCorpusLoader(
326
+ "swadesh", SwadeshCorpusReader, r"(?!README|\.).*", encoding="utf8"
327
+ )
328
+ swadesh110: PanlexSwadeshCorpusReader = LazyCorpusLoader(
329
+ "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh110/.*\.txt", encoding="utf8"
330
+ )
331
+ swadesh207: PanlexSwadeshCorpusReader = LazyCorpusLoader(
332
+ "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh207/.*\.txt", encoding="utf8"
333
+ )
334
+ switchboard: SwitchboardCorpusReader = LazyCorpusLoader(
335
+ "switchboard", SwitchboardCorpusReader, tagset="wsj"
336
+ )
337
+ timit: TimitCorpusReader = LazyCorpusLoader("timit", TimitCorpusReader)
338
+ timit_tagged: TimitTaggedCorpusReader = LazyCorpusLoader(
339
+ "timit", TimitTaggedCorpusReader, r".+\.tags", tagset="wsj", encoding="ascii"
340
+ )
341
+ toolbox: ToolboxCorpusReader = LazyCorpusLoader(
342
+ "toolbox", ToolboxCorpusReader, r"(?!.*(README|\.)).*\.(dic|txt)"
343
+ )
344
+ treebank: BracketParseCorpusReader = LazyCorpusLoader(
345
+ "treebank/combined",
346
+ BracketParseCorpusReader,
347
+ r"wsj_.*\.mrg",
348
+ tagset="wsj",
349
+ encoding="ascii",
350
+ )
351
+ treebank_chunk: ChunkedCorpusReader = LazyCorpusLoader(
352
+ "treebank/tagged",
353
+ ChunkedCorpusReader,
354
+ r"wsj_.*\.pos",
355
+ sent_tokenizer=RegexpTokenizer(r"(?<=/\.)\s*(?![^\[]*\])", gaps=True),
356
+ para_block_reader=tagged_treebank_para_block_reader,
357
+ tagset="wsj",
358
+ encoding="ascii",
359
+ )
360
+ treebank_raw: PlaintextCorpusReader = LazyCorpusLoader(
361
+ "treebank/raw", PlaintextCorpusReader, r"wsj_.*", encoding="ISO-8859-2"
362
+ )
363
+ twitter_samples: TwitterCorpusReader = LazyCorpusLoader(
364
+ "twitter_samples", TwitterCorpusReader, r".*\.json"
365
+ )
366
+ udhr: UdhrCorpusReader = LazyCorpusLoader("udhr", UdhrCorpusReader)
367
+ udhr2: PlaintextCorpusReader = LazyCorpusLoader(
368
+ "udhr2", PlaintextCorpusReader, r".*\.txt", encoding="utf8"
369
+ )
370
+ universal_treebanks: ConllCorpusReader = LazyCorpusLoader(
371
+ "universal_treebanks_v20",
372
+ ConllCorpusReader,
373
+ r".*\.conll",
374
+ columntypes=(
375
+ "ignore",
376
+ "words",
377
+ "ignore",
378
+ "ignore",
379
+ "pos",
380
+ "ignore",
381
+ "ignore",
382
+ "ignore",
383
+ "ignore",
384
+ "ignore",
385
+ ),
386
+ )
387
+ verbnet: VerbnetCorpusReader = LazyCorpusLoader(
388
+ "verbnet", VerbnetCorpusReader, r"(?!\.).*\.xml"
389
+ )
390
+ webtext: PlaintextCorpusReader = LazyCorpusLoader(
391
+ "webtext", PlaintextCorpusReader, r"(?!README|\.).*\.txt", encoding="ISO-8859-2"
392
+ )
393
+ wordnet: WordNetCorpusReader = LazyCorpusLoader(
394
+ "wordnet",
395
+ WordNetCorpusReader,
396
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
397
+ )
398
+ wordnet31: WordNetCorpusReader = LazyCorpusLoader(
399
+ "wordnet31",
400
+ WordNetCorpusReader,
401
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
402
+ )
403
+ wordnet2021: WordNetCorpusReader = LazyCorpusLoader(
404
+ "wordnet2021",
405
+ WordNetCorpusReader,
406
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
407
+ )
408
+ wordnet_ic: WordNetICCorpusReader = LazyCorpusLoader(
409
+ "wordnet_ic", WordNetICCorpusReader, r".*\.dat"
410
+ )
411
+ words: WordListCorpusReader = LazyCorpusLoader(
412
+ "words", WordListCorpusReader, r"(?!README|\.).*", encoding="ascii"
413
+ )
414
+
415
+ # defined after treebank
416
+ propbank: PropbankCorpusReader = LazyCorpusLoader(
417
+ "propbank",
418
+ PropbankCorpusReader,
419
+ "prop.txt",
420
+ r"frames/.*\.xml",
421
+ "verbs.txt",
422
+ lambda filename: re.sub(r"^wsj/\d\d/", "", filename),
423
+ treebank,
424
+ ) # Must be defined *after* treebank corpus.
425
+ nombank: NombankCorpusReader = LazyCorpusLoader(
426
+ "nombank.1.0",
427
+ NombankCorpusReader,
428
+ "nombank.1.0",
429
+ r"frames/.*\.xml",
430
+ "nombank.1.0.words",
431
+ lambda filename: re.sub(r"^wsj/\d\d/", "", filename),
432
+ treebank,
433
+ ) # Must be defined *after* treebank corpus.
434
+ propbank_ptb: PropbankCorpusReader = LazyCorpusLoader(
435
+ "propbank",
436
+ PropbankCorpusReader,
437
+ "prop.txt",
438
+ r"frames/.*\.xml",
439
+ "verbs.txt",
440
+ lambda filename: filename.upper(),
441
+ ptb,
442
+ ) # Must be defined *after* ptb corpus.
443
+ nombank_ptb: NombankCorpusReader = LazyCorpusLoader(
444
+ "nombank.1.0",
445
+ NombankCorpusReader,
446
+ "nombank.1.0",
447
+ r"frames/.*\.xml",
448
+ "nombank.1.0.words",
449
+ lambda filename: filename.upper(),
450
+ ptb,
451
+ ) # Must be defined *after* ptb corpus.
452
+ semcor: SemcorCorpusReader = LazyCorpusLoader(
453
+ "semcor", SemcorCorpusReader, r"brown./tagfiles/br-.*\.xml", wordnet
454
+ ) # Must be defined *after* wordnet corpus.
455
+
456
+ nonbreaking_prefixes: NonbreakingPrefixesCorpusReader = LazyCorpusLoader(
457
+ "nonbreaking_prefixes",
458
+ NonbreakingPrefixesCorpusReader,
459
+ r"(?!README|\.).*",
460
+ encoding="utf8",
461
+ )
462
+ perluniprops: UnicharsCorpusReader = LazyCorpusLoader(
463
+ "perluniprops",
464
+ UnicharsCorpusReader,
465
+ r"(?!README|\.).*",
466
+ nltk_data_subdir="misc",
467
+ encoding="utf8",
468
+ )
469
+
470
+ # mwa_ppdb = LazyCorpusLoader(
471
+ # 'mwa_ppdb', MWAPPDBCorpusReader, r'(?!README|\.).*', nltk_data_subdir='misc', encoding='utf8')
472
+
473
+ # See https://github.com/nltk/nltk/issues/1579
474
+ # and https://github.com/nltk/nltk/issues/1716
475
+ #
476
+ # pl196x = LazyCorpusLoader(
477
+ # 'pl196x', Pl196xCorpusReader, r'[a-z]-.*\.xml',
478
+ # cat_file='cats.txt', textid_file='textids.txt', encoding='utf8')
479
+ #
480
+ # ipipan = LazyCorpusLoader(
481
+ # 'ipipan', IPIPANCorpusReader, r'(?!\.).*morph\.xml')
482
+ #
483
+ # nkjp = LazyCorpusLoader(
484
+ # 'nkjp', NKJPCorpusReader, r'', encoding='utf8')
485
+ #
486
+ # panlex_lite = LazyCorpusLoader(
487
+ # 'panlex_lite', PanLexLiteCorpusReader)
488
+ #
489
+ # ycoe = LazyCorpusLoader(
490
+ # 'ycoe', YCOECorpusReader)
491
+ #
492
+ # corpus not available with NLTK; these lines caused help(nltk.corpus) to break
493
+ # hebrew_treebank = LazyCorpusLoader(
494
+ # 'hebrew_treebank', BracketParseCorpusReader, r'.*\.txt')
495
+
496
+ # FIXME: override any imported demo from various corpora, see https://github.com/nltk/nltk/issues/2116
497
+ def demo():
498
+ # This is out-of-date:
499
+ abc.demo()
500
+ brown.demo()
501
+ # chat80.demo()
502
+ cmudict.demo()
503
+ conll2000.demo()
504
+ conll2002.demo()
505
+ genesis.demo()
506
+ gutenberg.demo()
507
+ ieer.demo()
508
+ inaugural.demo()
509
+ indian.demo()
510
+ names.demo()
511
+ ppattach.demo()
512
+ senseval.demo()
513
+ shakespeare.demo()
514
+ sinica_treebank.demo()
515
+ state_union.demo()
516
+ stopwords.demo()
517
+ timit.demo()
518
+ toolbox.demo()
519
+ treebank.demo()
520
+ udhr.demo()
521
+ webtext.demo()
522
+ words.demo()
523
+
524
+
525
+ # ycoe.demo()
526
+
527
+ if __name__ == "__main__":
528
+ # demo()
529
+ pass
lib/python3.10/site-packages/nltk/corpus/europarl_raw.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Europarl Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Nitin Madnani <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import re
9
+
10
+ from nltk.corpus.reader import *
11
+ from nltk.corpus.util import LazyCorpusLoader
12
+
13
+ # Create a new corpus reader instance for each European language
14
+ danish: EuroparlCorpusReader = LazyCorpusLoader(
15
+ "europarl_raw/danish", EuroparlCorpusReader, r"ep-.*\.da", encoding="utf-8"
16
+ )
17
+
18
+ dutch: EuroparlCorpusReader = LazyCorpusLoader(
19
+ "europarl_raw/dutch", EuroparlCorpusReader, r"ep-.*\.nl", encoding="utf-8"
20
+ )
21
+
22
+ english: EuroparlCorpusReader = LazyCorpusLoader(
23
+ "europarl_raw/english", EuroparlCorpusReader, r"ep-.*\.en", encoding="utf-8"
24
+ )
25
+
26
+ finnish: EuroparlCorpusReader = LazyCorpusLoader(
27
+ "europarl_raw/finnish", EuroparlCorpusReader, r"ep-.*\.fi", encoding="utf-8"
28
+ )
29
+
30
+ french: EuroparlCorpusReader = LazyCorpusLoader(
31
+ "europarl_raw/french", EuroparlCorpusReader, r"ep-.*\.fr", encoding="utf-8"
32
+ )
33
+
34
+ german: EuroparlCorpusReader = LazyCorpusLoader(
35
+ "europarl_raw/german", EuroparlCorpusReader, r"ep-.*\.de", encoding="utf-8"
36
+ )
37
+
38
+ greek: EuroparlCorpusReader = LazyCorpusLoader(
39
+ "europarl_raw/greek", EuroparlCorpusReader, r"ep-.*\.el", encoding="utf-8"
40
+ )
41
+
42
+ italian: EuroparlCorpusReader = LazyCorpusLoader(
43
+ "europarl_raw/italian", EuroparlCorpusReader, r"ep-.*\.it", encoding="utf-8"
44
+ )
45
+
46
+ portuguese: EuroparlCorpusReader = LazyCorpusLoader(
47
+ "europarl_raw/portuguese", EuroparlCorpusReader, r"ep-.*\.pt", encoding="utf-8"
48
+ )
49
+
50
+ spanish: EuroparlCorpusReader = LazyCorpusLoader(
51
+ "europarl_raw/spanish", EuroparlCorpusReader, r"ep-.*\.es", encoding="utf-8"
52
+ )
53
+
54
+ swedish: EuroparlCorpusReader = LazyCorpusLoader(
55
+ "europarl_raw/swedish", EuroparlCorpusReader, r"ep-.*\.sv", encoding="utf-8"
56
+ )
lib/python3.10/site-packages/nltk/corpus/reader/bnc.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Plaintext Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """Corpus reader for the XML version of the British National Corpus."""
9
+
10
+ from nltk.corpus.reader.util import concat
11
+ from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader, XMLCorpusView
12
+
13
+
14
+ class BNCCorpusReader(XMLCorpusReader):
15
+ r"""Corpus reader for the XML version of the British National Corpus.
16
+
17
+ For access to the complete XML data structure, use the ``xml()``
18
+ method. For access to simple word lists and tagged word lists, use
19
+ ``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``.
20
+
21
+ You can obtain the full version of the BNC corpus at
22
+ https://www.ota.ox.ac.uk/desc/2554
23
+
24
+ If you extracted the archive to a directory called `BNC`, then you can
25
+ instantiate the reader as::
26
+
27
+ BNCCorpusReader(root='BNC/Texts/', fileids=r'[A-K]/\w*/\w*\.xml')
28
+
29
+ """
30
+
31
+ def __init__(self, root, fileids, lazy=True):
32
+ XMLCorpusReader.__init__(self, root, fileids)
33
+ self._lazy = lazy
34
+
35
+ def words(self, fileids=None, strip_space=True, stem=False):
36
+ """
37
+ :return: the given file(s) as a list of words
38
+ and punctuation symbols.
39
+ :rtype: list(str)
40
+
41
+ :param strip_space: If true, then strip trailing spaces from
42
+ word tokens. Otherwise, leave the spaces on the tokens.
43
+ :param stem: If true, then use word stems instead of word strings.
44
+ """
45
+ return self._views(fileids, False, None, strip_space, stem)
46
+
47
+ def tagged_words(self, fileids=None, c5=False, strip_space=True, stem=False):
48
+ """
49
+ :return: the given file(s) as a list of tagged
50
+ words and punctuation symbols, encoded as tuples
51
+ ``(word,tag)``.
52
+ :rtype: list(tuple(str,str))
53
+
54
+ :param c5: If true, then the tags used will be the more detailed
55
+ c5 tags. Otherwise, the simplified tags will be used.
56
+ :param strip_space: If true, then strip trailing spaces from
57
+ word tokens. Otherwise, leave the spaces on the tokens.
58
+ :param stem: If true, then use word stems instead of word strings.
59
+ """
60
+ tag = "c5" if c5 else "pos"
61
+ return self._views(fileids, False, tag, strip_space, stem)
62
+
63
+ def sents(self, fileids=None, strip_space=True, stem=False):
64
+ """
65
+ :return: the given file(s) as a list of
66
+ sentences or utterances, each encoded as a list of word
67
+ strings.
68
+ :rtype: list(list(str))
69
+
70
+ :param strip_space: If true, then strip trailing spaces from
71
+ word tokens. Otherwise, leave the spaces on the tokens.
72
+ :param stem: If true, then use word stems instead of word strings.
73
+ """
74
+ return self._views(fileids, True, None, strip_space, stem)
75
+
76
+ def tagged_sents(self, fileids=None, c5=False, strip_space=True, stem=False):
77
+ """
78
+ :return: the given file(s) as a list of
79
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
80
+ :rtype: list(list(tuple(str,str)))
81
+
82
+ :param c5: If true, then the tags used will be the more detailed
83
+ c5 tags. Otherwise, the simplified tags will be used.
84
+ :param strip_space: If true, then strip trailing spaces from
85
+ word tokens. Otherwise, leave the spaces on the tokens.
86
+ :param stem: If true, then use word stems instead of word strings.
87
+ """
88
+ tag = "c5" if c5 else "pos"
89
+ return self._views(
90
+ fileids, sent=True, tag=tag, strip_space=strip_space, stem=stem
91
+ )
92
+
93
+ def _views(self, fileids=None, sent=False, tag=False, strip_space=True, stem=False):
94
+ """A helper function that instantiates BNCWordViews or the list of words/sentences."""
95
+ f = BNCWordView if self._lazy else self._words
96
+ return concat(
97
+ [
98
+ f(fileid, sent, tag, strip_space, stem)
99
+ for fileid in self.abspaths(fileids)
100
+ ]
101
+ )
102
+
103
+ def _words(self, fileid, bracket_sent, tag, strip_space, stem):
104
+ """
105
+ Helper used to implement the view methods -- returns a list of
106
+ words or a list of sentences, optionally tagged.
107
+
108
+ :param fileid: The name of the underlying file.
109
+ :param bracket_sent: If true, include sentence bracketing.
110
+ :param tag: The name of the tagset to use, or None for no tags.
111
+ :param strip_space: If true, strip spaces from word tokens.
112
+ :param stem: If true, then substitute stems for words.
113
+ """
114
+ result = []
115
+
116
+ xmldoc = ElementTree.parse(fileid).getroot()
117
+ for xmlsent in xmldoc.findall(".//s"):
118
+ sent = []
119
+ for xmlword in _all_xmlwords_in(xmlsent):
120
+ word = xmlword.text
121
+ if not word:
122
+ word = "" # fixes issue 337?
123
+ if strip_space or stem:
124
+ word = word.strip()
125
+ if stem:
126
+ word = xmlword.get("hw", word)
127
+ if tag == "c5":
128
+ word = (word, xmlword.get("c5"))
129
+ elif tag == "pos":
130
+ word = (word, xmlword.get("pos", xmlword.get("c5")))
131
+ sent.append(word)
132
+ if bracket_sent:
133
+ result.append(BNCSentence(xmlsent.attrib["n"], sent))
134
+ else:
135
+ result.extend(sent)
136
+
137
+ assert None not in result
138
+ return result
139
+
140
+
141
+ def _all_xmlwords_in(elt, result=None):
142
+ if result is None:
143
+ result = []
144
+ for child in elt:
145
+ if child.tag in ("c", "w"):
146
+ result.append(child)
147
+ else:
148
+ _all_xmlwords_in(child, result)
149
+ return result
150
+
151
+
152
+ class BNCSentence(list):
153
+ """
154
+ A list of words, augmented by an attribute ``num`` used to record
155
+ the sentence identifier (the ``n`` attribute from the XML).
156
+ """
157
+
158
+ def __init__(self, num, items):
159
+ self.num = num
160
+ list.__init__(self, items)
161
+
162
+
163
+ class BNCWordView(XMLCorpusView):
164
+ """
165
+ A stream backed corpus view specialized for use with the BNC corpus.
166
+ """
167
+
168
+ tags_to_ignore = {
169
+ "pb",
170
+ "gap",
171
+ "vocal",
172
+ "event",
173
+ "unclear",
174
+ "shift",
175
+ "pause",
176
+ "align",
177
+ }
178
+ """These tags are ignored. For their description refer to the
179
+ technical documentation, for example,
180
+ http://www.natcorp.ox.ac.uk/docs/URG/ref-vocal.html
181
+
182
+ """
183
+
184
+ def __init__(self, fileid, sent, tag, strip_space, stem):
185
+ """
186
+ :param fileid: The name of the underlying file.
187
+ :param sent: If true, include sentence bracketing.
188
+ :param tag: The name of the tagset to use, or None for no tags.
189
+ :param strip_space: If true, strip spaces from word tokens.
190
+ :param stem: If true, then substitute stems for words.
191
+ """
192
+ if sent:
193
+ tagspec = ".*/s"
194
+ else:
195
+ tagspec = ".*/s/(.*/)?(c|w)"
196
+ self._sent = sent
197
+ self._tag = tag
198
+ self._strip_space = strip_space
199
+ self._stem = stem
200
+
201
+ self.title = None #: Title of the document.
202
+ self.author = None #: Author of the document.
203
+ self.editor = None #: Editor
204
+ self.resps = None #: Statement of responsibility
205
+
206
+ XMLCorpusView.__init__(self, fileid, tagspec)
207
+
208
+ # Read in a tasty header.
209
+ self._open()
210
+ self.read_block(self._stream, ".*/teiHeader$", self.handle_header)
211
+ self.close()
212
+
213
+ # Reset tag context.
214
+ self._tag_context = {0: ()}
215
+
216
+ def handle_header(self, elt, context):
217
+ # Set up some metadata!
218
+ titles = elt.findall("titleStmt/title")
219
+ if titles:
220
+ self.title = "\n".join(title.text.strip() for title in titles)
221
+
222
+ authors = elt.findall("titleStmt/author")
223
+ if authors:
224
+ self.author = "\n".join(author.text.strip() for author in authors)
225
+
226
+ editors = elt.findall("titleStmt/editor")
227
+ if editors:
228
+ self.editor = "\n".join(editor.text.strip() for editor in editors)
229
+
230
+ resps = elt.findall("titleStmt/respStmt")
231
+ if resps:
232
+ self.resps = "\n\n".join(
233
+ "\n".join(resp_elt.text.strip() for resp_elt in resp) for resp in resps
234
+ )
235
+
236
+ def handle_elt(self, elt, context):
237
+ if self._sent:
238
+ return self.handle_sent(elt)
239
+ else:
240
+ return self.handle_word(elt)
241
+
242
+ def handle_word(self, elt):
243
+ word = elt.text
244
+ if not word:
245
+ word = "" # fixes issue 337?
246
+ if self._strip_space or self._stem:
247
+ word = word.strip()
248
+ if self._stem:
249
+ word = elt.get("hw", word)
250
+ if self._tag == "c5":
251
+ word = (word, elt.get("c5"))
252
+ elif self._tag == "pos":
253
+ word = (word, elt.get("pos", elt.get("c5")))
254
+ return word
255
+
256
+ def handle_sent(self, elt):
257
+ sent = []
258
+ for child in elt:
259
+ if child.tag in ("mw", "hi", "corr", "trunc"):
260
+ sent += [self.handle_word(w) for w in child]
261
+ elif child.tag in ("w", "c"):
262
+ sent.append(self.handle_word(child))
263
+ elif child.tag not in self.tags_to_ignore:
264
+ raise ValueError("Unexpected element %s" % child.tag)
265
+ return BNCSentence(elt.attrib["n"], sent)
lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Penn Treebank Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ """
9
+ Corpus reader for corpora that consist of parenthesis-delineated parse trees.
10
+ """
11
+
12
+ import sys
13
+
14
+ from nltk.corpus.reader.api import *
15
+ from nltk.corpus.reader.util import *
16
+ from nltk.tag import map_tag
17
+ from nltk.tree import Tree
18
+
19
+ # we use [^\s()]+ instead of \S+? to avoid matching ()
20
+ SORTTAGWRD = re.compile(r"\((\d+) ([^\s()]+) ([^\s()]+)\)")
21
+ TAGWORD = re.compile(r"\(([^\s()]+) ([^\s()]+)\)")
22
+ WORD = re.compile(r"\([^\s()]+ ([^\s()]+)\)")
23
+ EMPTY_BRACKETS = re.compile(r"\s*\(\s*\(")
24
+
25
+
26
+ class BracketParseCorpusReader(SyntaxCorpusReader):
27
+ """
28
+ Reader for corpora that consist of parenthesis-delineated parse trees,
29
+ like those found in the "combined" section of the Penn Treebank,
30
+ e.g. "(S (NP (DT the) (JJ little) (NN dog)) (VP (VBD barked)))".
31
+
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ root,
37
+ fileids,
38
+ comment_char=None,
39
+ detect_blocks="unindented_paren",
40
+ encoding="utf8",
41
+ tagset=None,
42
+ ):
43
+ """
44
+ :param root: The root directory for this corpus.
45
+ :param fileids: A list or regexp specifying the fileids in this corpus.
46
+ :param comment_char: The character which can appear at the start of
47
+ a line to indicate that the rest of the line is a comment.
48
+ :param detect_blocks: The method that is used to find blocks
49
+ in the corpus; can be 'unindented_paren' (every unindented
50
+ parenthesis starts a new parse) or 'sexpr' (brackets are
51
+ matched).
52
+ :param tagset: The name of the tagset used by this corpus, to be used
53
+ for normalizing or converting the POS tags returned by the
54
+ ``tagged_...()`` methods.
55
+ """
56
+ SyntaxCorpusReader.__init__(self, root, fileids, encoding)
57
+ self._comment_char = comment_char
58
+ self._detect_blocks = detect_blocks
59
+ self._tagset = tagset
60
+
61
+ def _read_block(self, stream):
62
+ if self._detect_blocks == "sexpr":
63
+ return read_sexpr_block(stream, comment_char=self._comment_char)
64
+ elif self._detect_blocks == "blankline":
65
+ return read_blankline_block(stream)
66
+ elif self._detect_blocks == "unindented_paren":
67
+ # Tokens start with unindented left parens.
68
+ toks = read_regexp_block(stream, start_re=r"^\(")
69
+ # Strip any comments out of the tokens.
70
+ if self._comment_char:
71
+ toks = [
72
+ re.sub("(?m)^%s.*" % re.escape(self._comment_char), "", tok)
73
+ for tok in toks
74
+ ]
75
+ return toks
76
+ else:
77
+ assert 0, "bad block type"
78
+
79
+ def _normalize(self, t):
80
+ # Replace leaves of the form (!), (,), with (! !), (, ,)
81
+ t = re.sub(r"\((.)\)", r"(\1 \1)", t)
82
+ # Replace leaves of the form (tag word root) with (tag word)
83
+ t = re.sub(r"\(([^\s()]+) ([^\s()]+) [^\s()]+\)", r"(\1 \2)", t)
84
+ return t
85
+
86
+ def _parse(self, t):
87
+ try:
88
+ tree = Tree.fromstring(self._normalize(t))
89
+ # If there's an empty node at the top, strip it off
90
+ if tree.label() == "" and len(tree) == 1:
91
+ return tree[0]
92
+ else:
93
+ return tree
94
+
95
+ except ValueError as e:
96
+ sys.stderr.write("Bad tree detected; trying to recover...\n")
97
+ # Try to recover, if we can:
98
+ if e.args == ("mismatched parens",):
99
+ for n in range(1, 5):
100
+ try:
101
+ v = Tree(self._normalize(t + ")" * n))
102
+ sys.stderr.write(
103
+ " Recovered by adding %d close " "paren(s)\n" % n
104
+ )
105
+ return v
106
+ except ValueError:
107
+ pass
108
+ # Try something else:
109
+ sys.stderr.write(" Recovered by returning a flat parse.\n")
110
+ # sys.stderr.write(' '.join(t.split())+'\n')
111
+ return Tree("S", self._tag(t))
112
+
113
+ def _tag(self, t, tagset=None):
114
+ tagged_sent = [(w, p) for (p, w) in TAGWORD.findall(self._normalize(t))]
115
+ if tagset and tagset != self._tagset:
116
+ tagged_sent = [
117
+ (w, map_tag(self._tagset, tagset, p)) for (w, p) in tagged_sent
118
+ ]
119
+ return tagged_sent
120
+
121
+ def _word(self, t):
122
+ return WORD.findall(self._normalize(t))
123
+
124
+
125
+ class CategorizedBracketParseCorpusReader(
126
+ CategorizedCorpusReader, BracketParseCorpusReader
127
+ ):
128
+ """
129
+ A reader for parsed corpora whose documents are
130
+ divided into categories based on their file identifiers.
131
+ @author: Nathan Schneider <[email protected]>
132
+ """
133
+
134
+ def __init__(self, *args, **kwargs):
135
+ """
136
+ Initialize the corpus reader. Categorization arguments
137
+ (C{cat_pattern}, C{cat_map}, and C{cat_file}) are passed to
138
+ the L{CategorizedCorpusReader constructor
139
+ <CategorizedCorpusReader.__init__>}. The remaining arguments
140
+ are passed to the L{BracketParseCorpusReader constructor
141
+ <BracketParseCorpusReader.__init__>}.
142
+ """
143
+ CategorizedCorpusReader.__init__(self, kwargs)
144
+ BracketParseCorpusReader.__init__(self, *args, **kwargs)
145
+
146
+ def tagged_words(self, fileids=None, categories=None, tagset=None):
147
+ return super().tagged_words(self._resolve(fileids, categories), tagset)
148
+
149
+ def tagged_sents(self, fileids=None, categories=None, tagset=None):
150
+ return super().tagged_sents(self._resolve(fileids, categories), tagset)
151
+
152
+ def tagged_paras(self, fileids=None, categories=None, tagset=None):
153
+ return super().tagged_paras(self._resolve(fileids, categories), tagset)
154
+
155
+ def parsed_words(self, fileids=None, categories=None):
156
+ return super().parsed_words(self._resolve(fileids, categories))
157
+
158
+ def parsed_sents(self, fileids=None, categories=None):
159
+ return super().parsed_sents(self._resolve(fileids, categories))
160
+
161
+ def parsed_paras(self, fileids=None, categories=None):
162
+ return super().parsed_paras(self._resolve(fileids, categories))
163
+
164
+
165
+ class AlpinoCorpusReader(BracketParseCorpusReader):
166
+ """
167
+ Reader for the Alpino Dutch Treebank.
168
+ This corpus has a lexical breakdown structure embedded, as read by `_parse`
169
+ Unfortunately this puts punctuation and some other words out of the sentence
170
+ order in the xml element tree. This is no good for `tag_` and `word_`
171
+ `_tag` and `_word` will be overridden to use a non-default new parameter 'ordered'
172
+ to the overridden _normalize function. The _parse function can then remain
173
+ untouched.
174
+ """
175
+
176
+ def __init__(self, root, encoding="ISO-8859-1", tagset=None):
177
+ BracketParseCorpusReader.__init__(
178
+ self,
179
+ root,
180
+ r"alpino\.xml",
181
+ detect_blocks="blankline",
182
+ encoding=encoding,
183
+ tagset=tagset,
184
+ )
185
+
186
+ def _normalize(self, t, ordered=False):
187
+ """Normalize the xml sentence element in t.
188
+ The sentence elements <alpino_ds>, although embedded in a few overall
189
+ xml elements, are separated by blank lines. That's how the reader can
190
+ deliver them one at a time.
191
+ Each sentence has a few category subnodes that are of no use to us.
192
+ The remaining word nodes may or may not appear in the proper order.
193
+ Each word node has attributes, among which:
194
+ - begin : the position of the word in the sentence
195
+ - pos : Part of Speech: the Tag
196
+ - word : the actual word
197
+ The return value is a string with all xml elementes replaced by
198
+ clauses: either a cat clause with nested clauses, or a word clause.
199
+ The order of the bracket clauses closely follows the xml.
200
+ If ordered == True, the word clauses include an order sequence number.
201
+ If ordered == False, the word clauses only have pos and word parts.
202
+ """
203
+ if t[:10] != "<alpino_ds":
204
+ return ""
205
+ # convert XML to sexpr notation
206
+ t = re.sub(r' <node .*? cat="(\w+)".*>', r"(\1", t)
207
+ if ordered:
208
+ t = re.sub(
209
+ r' <node. *?begin="(\d+)".*? pos="(\w+)".*? word="([^"]+)".*?/>',
210
+ r"(\1 \2 \3)",
211
+ t,
212
+ )
213
+ else:
214
+ t = re.sub(r' <node .*?pos="(\w+)".*? word="([^"]+)".*?/>', r"(\1 \2)", t)
215
+ t = re.sub(r" </node>", r")", t)
216
+ t = re.sub(r"<sentence>.*</sentence>", r"", t)
217
+ t = re.sub(r"</?alpino_ds.*>", r"", t)
218
+ return t
219
+
220
+ def _tag(self, t, tagset=None):
221
+ tagged_sent = [
222
+ (int(o), w, p)
223
+ for (o, p, w) in SORTTAGWRD.findall(self._normalize(t, ordered=True))
224
+ ]
225
+ tagged_sent.sort()
226
+ if tagset and tagset != self._tagset:
227
+ tagged_sent = [
228
+ (w, map_tag(self._tagset, tagset, p)) for (o, w, p) in tagged_sent
229
+ ]
230
+ else:
231
+ tagged_sent = [(w, p) for (o, w, p) in tagged_sent]
232
+ return tagged_sent
233
+
234
+ def _word(self, t):
235
+ """Return a correctly ordered list if words"""
236
+ tagged_sent = self._tag(t)
237
+ return [w for (w, p) in tagged_sent]
lib/python3.10/site-packages/nltk/corpus/reader/categorized_sents.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Categorized Sentences Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Pierpaolo Pantone <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader structured for corpora that contain one instance on each row.
10
+ This CorpusReader is specifically used for the Subjectivity Dataset and the
11
+ Sentence Polarity Dataset.
12
+
13
+ - Subjectivity Dataset information -
14
+
15
+ Authors: Bo Pang and Lillian Lee.
16
+ Url: https://www.cs.cornell.edu/people/pabo/movie-review-data
17
+
18
+ Distributed with permission.
19
+
20
+ Related papers:
21
+
22
+ - Bo Pang and Lillian Lee. "A Sentimental Education: Sentiment Analysis Using
23
+ Subjectivity Summarization Based on Minimum Cuts". Proceedings of the ACL,
24
+ 2004.
25
+
26
+ - Sentence Polarity Dataset information -
27
+
28
+ Authors: Bo Pang and Lillian Lee.
29
+ Url: https://www.cs.cornell.edu/people/pabo/movie-review-data
30
+
31
+ Related papers:
32
+
33
+ - Bo Pang and Lillian Lee. "Seeing stars: Exploiting class relationships for
34
+ sentiment categorization with respect to rating scales". Proceedings of the
35
+ ACL, 2005.
36
+ """
37
+
38
+ from nltk.corpus.reader.api import *
39
+ from nltk.tokenize import *
40
+
41
+
42
+ class CategorizedSentencesCorpusReader(CategorizedCorpusReader, CorpusReader):
43
+ """
44
+ A reader for corpora in which each row represents a single instance, mainly
45
+ a sentence. Istances are divided into categories based on their file identifiers
46
+ (see CategorizedCorpusReader).
47
+ Since many corpora allow rows that contain more than one sentence, it is
48
+ possible to specify a sentence tokenizer to retrieve all sentences instead
49
+ than all rows.
50
+
51
+ Examples using the Subjectivity Dataset:
52
+
53
+ >>> from nltk.corpus import subjectivity
54
+ >>> subjectivity.sents()[23] # doctest: +NORMALIZE_WHITESPACE
55
+ ['television', 'made', 'him', 'famous', ',', 'but', 'his', 'biggest', 'hits',
56
+ 'happened', 'off', 'screen', '.']
57
+ >>> subjectivity.categories()
58
+ ['obj', 'subj']
59
+ >>> subjectivity.words(categories='subj')
60
+ ['smart', 'and', 'alert', ',', 'thirteen', ...]
61
+
62
+ Examples using the Sentence Polarity Dataset:
63
+
64
+ >>> from nltk.corpus import sentence_polarity
65
+ >>> sentence_polarity.sents() # doctest: +NORMALIZE_WHITESPACE
66
+ [['simplistic', ',', 'silly', 'and', 'tedious', '.'], ["it's", 'so', 'laddish',
67
+ 'and', 'juvenile', ',', 'only', 'teenage', 'boys', 'could', 'possibly', 'find',
68
+ 'it', 'funny', '.'], ...]
69
+ >>> sentence_polarity.categories()
70
+ ['neg', 'pos']
71
+ """
72
+
73
+ CorpusView = StreamBackedCorpusView
74
+
75
+ def __init__(
76
+ self,
77
+ root,
78
+ fileids,
79
+ word_tokenizer=WhitespaceTokenizer(),
80
+ sent_tokenizer=None,
81
+ encoding="utf8",
82
+ **kwargs
83
+ ):
84
+ """
85
+ :param root: The root directory for the corpus.
86
+ :param fileids: a list or regexp specifying the fileids in the corpus.
87
+ :param word_tokenizer: a tokenizer for breaking sentences or paragraphs
88
+ into words. Default: `WhitespaceTokenizer`
89
+ :param sent_tokenizer: a tokenizer for breaking paragraphs into sentences.
90
+ :param encoding: the encoding that should be used to read the corpus.
91
+ :param kwargs: additional parameters passed to CategorizedCorpusReader.
92
+ """
93
+
94
+ CorpusReader.__init__(self, root, fileids, encoding)
95
+ CategorizedCorpusReader.__init__(self, kwargs)
96
+ self._word_tokenizer = word_tokenizer
97
+ self._sent_tokenizer = sent_tokenizer
98
+
99
+ def sents(self, fileids=None, categories=None):
100
+ """
101
+ Return all sentences in the corpus or in the specified file(s).
102
+
103
+ :param fileids: a list or regexp specifying the ids of the files whose
104
+ sentences have to be returned.
105
+ :param categories: a list specifying the categories whose sentences have
106
+ to be returned.
107
+ :return: the given file(s) as a list of sentences.
108
+ Each sentence is tokenized using the specified word_tokenizer.
109
+ :rtype: list(list(str))
110
+ """
111
+ fileids = self._resolve(fileids, categories)
112
+ if fileids is None:
113
+ fileids = self._fileids
114
+ elif isinstance(fileids, str):
115
+ fileids = [fileids]
116
+ return concat(
117
+ [
118
+ self.CorpusView(path, self._read_sent_block, encoding=enc)
119
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
120
+ ]
121
+ )
122
+
123
+ def words(self, fileids=None, categories=None):
124
+ """
125
+ Return all words and punctuation symbols in the corpus or in the specified
126
+ file(s).
127
+
128
+ :param fileids: a list or regexp specifying the ids of the files whose
129
+ words have to be returned.
130
+ :param categories: a list specifying the categories whose words have to
131
+ be returned.
132
+ :return: the given file(s) as a list of words and punctuation symbols.
133
+ :rtype: list(str)
134
+ """
135
+ fileids = self._resolve(fileids, categories)
136
+ if fileids is None:
137
+ fileids = self._fileids
138
+ elif isinstance(fileids, str):
139
+ fileids = [fileids]
140
+ return concat(
141
+ [
142
+ self.CorpusView(path, self._read_word_block, encoding=enc)
143
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
144
+ ]
145
+ )
146
+
147
+ def _read_sent_block(self, stream):
148
+ sents = []
149
+ for i in range(20): # Read 20 lines at a time.
150
+ line = stream.readline()
151
+ if not line:
152
+ continue
153
+ if self._sent_tokenizer:
154
+ sents.extend(
155
+ [
156
+ self._word_tokenizer.tokenize(sent)
157
+ for sent in self._sent_tokenizer.tokenize(line)
158
+ ]
159
+ )
160
+ else:
161
+ sents.append(self._word_tokenizer.tokenize(line))
162
+ return sents
163
+
164
+ def _read_word_block(self, stream):
165
+ words = []
166
+ for sent in self._read_sent_block(stream):
167
+ words.extend(sent)
168
+ return words
lib/python3.10/site-packages/nltk/corpus/reader/chasen.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (C) 2001-2023 NLTK Project
3
+ # Author: Masato Hagiwara <[email protected]>
4
+ # URL: <https://www.nltk.org/>
5
+ # For license information, see LICENSE.TXT
6
+
7
+ import sys
8
+
9
+ from nltk.corpus.reader import util
10
+ from nltk.corpus.reader.api import *
11
+ from nltk.corpus.reader.util import *
12
+
13
+
14
+ class ChasenCorpusReader(CorpusReader):
15
+ def __init__(self, root, fileids, encoding="utf8", sent_splitter=None):
16
+ self._sent_splitter = sent_splitter
17
+ CorpusReader.__init__(self, root, fileids, encoding)
18
+
19
+ def words(self, fileids=None):
20
+ return concat(
21
+ [
22
+ ChasenCorpusView(fileid, enc, False, False, False, self._sent_splitter)
23
+ for (fileid, enc) in self.abspaths(fileids, True)
24
+ ]
25
+ )
26
+
27
+ def tagged_words(self, fileids=None):
28
+ return concat(
29
+ [
30
+ ChasenCorpusView(fileid, enc, True, False, False, self._sent_splitter)
31
+ for (fileid, enc) in self.abspaths(fileids, True)
32
+ ]
33
+ )
34
+
35
+ def sents(self, fileids=None):
36
+ return concat(
37
+ [
38
+ ChasenCorpusView(fileid, enc, False, True, False, self._sent_splitter)
39
+ for (fileid, enc) in self.abspaths(fileids, True)
40
+ ]
41
+ )
42
+
43
+ def tagged_sents(self, fileids=None):
44
+ return concat(
45
+ [
46
+ ChasenCorpusView(fileid, enc, True, True, False, self._sent_splitter)
47
+ for (fileid, enc) in self.abspaths(fileids, True)
48
+ ]
49
+ )
50
+
51
+ def paras(self, fileids=None):
52
+ return concat(
53
+ [
54
+ ChasenCorpusView(fileid, enc, False, True, True, self._sent_splitter)
55
+ for (fileid, enc) in self.abspaths(fileids, True)
56
+ ]
57
+ )
58
+
59
+ def tagged_paras(self, fileids=None):
60
+ return concat(
61
+ [
62
+ ChasenCorpusView(fileid, enc, True, True, True, self._sent_splitter)
63
+ for (fileid, enc) in self.abspaths(fileids, True)
64
+ ]
65
+ )
66
+
67
+
68
+ class ChasenCorpusView(StreamBackedCorpusView):
69
+ """
70
+ A specialized corpus view for ChasenReader. Similar to ``TaggedCorpusView``,
71
+ but this'll use fixed sets of word and sentence tokenizer.
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ corpus_file,
77
+ encoding,
78
+ tagged,
79
+ group_by_sent,
80
+ group_by_para,
81
+ sent_splitter=None,
82
+ ):
83
+ self._tagged = tagged
84
+ self._group_by_sent = group_by_sent
85
+ self._group_by_para = group_by_para
86
+ self._sent_splitter = sent_splitter
87
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
88
+
89
+ def read_block(self, stream):
90
+ """Reads one paragraph at a time."""
91
+ block = []
92
+ for para_str in read_regexp_block(stream, r".", r"^EOS\n"):
93
+
94
+ para = []
95
+
96
+ sent = []
97
+ for line in para_str.splitlines():
98
+
99
+ _eos = line.strip() == "EOS"
100
+ _cells = line.split("\t")
101
+ w = (_cells[0], "\t".join(_cells[1:]))
102
+ if not _eos:
103
+ sent.append(w)
104
+
105
+ if _eos or (self._sent_splitter and self._sent_splitter(w)):
106
+ if not self._tagged:
107
+ sent = [w for (w, t) in sent]
108
+ if self._group_by_sent:
109
+ para.append(sent)
110
+ else:
111
+ para.extend(sent)
112
+ sent = []
113
+
114
+ if len(sent) > 0:
115
+ if not self._tagged:
116
+ sent = [w for (w, t) in sent]
117
+
118
+ if self._group_by_sent:
119
+ para.append(sent)
120
+ else:
121
+ para.extend(sent)
122
+
123
+ if self._group_by_para:
124
+ block.append(para)
125
+ else:
126
+ block.extend(para)
127
+
128
+ return block
129
+
130
+
131
+ def demo():
132
+
133
+ import nltk
134
+ from nltk.corpus.util import LazyCorpusLoader
135
+
136
+ jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
137
+ print("/".join(jeita.words()[22100:22140]))
138
+
139
+ print(
140
+ "\nEOS\n".join(
141
+ "\n".join("{}/{}".format(w[0], w[1].split("\t")[2]) for w in sent)
142
+ for sent in jeita.tagged_sents()[2170:2173]
143
+ )
144
+ )
145
+
146
+
147
+ def test():
148
+
149
+ from nltk.corpus.util import LazyCorpusLoader
150
+
151
+ jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
152
+
153
+ assert isinstance(jeita.tagged_words()[0][1], str)
154
+
155
+
156
+ if __name__ == "__main__":
157
+ demo()
158
+ test()
lib/python3.10/site-packages/nltk/corpus/reader/chunked.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chunked Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A reader for corpora that contain chunked (and optionally tagged)
11
+ documents.
12
+ """
13
+
14
+ import codecs
15
+ import os.path
16
+
17
+ import nltk
18
+ from nltk.chunk import tagstr2tree
19
+ from nltk.corpus.reader.api import *
20
+ from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
21
+ from nltk.corpus.reader.util import *
22
+ from nltk.tokenize import *
23
+ from nltk.tree import Tree
24
+
25
+
26
+ class ChunkedCorpusReader(CorpusReader):
27
+ """
28
+ Reader for chunked (and optionally tagged) corpora. Paragraphs
29
+ are split using a block reader. They are then tokenized into
30
+ sentences using a sentence tokenizer. Finally, these sentences
31
+ are parsed into chunk trees using a string-to-chunktree conversion
32
+ function. Each of these steps can be performed using a default
33
+ function or a custom function. By default, paragraphs are split
34
+ on blank lines; sentences are listed one per line; and sentences
35
+ are parsed into chunk trees using ``nltk.chunk.tagstr2tree``.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ root,
41
+ fileids,
42
+ extension="",
43
+ str2chunktree=tagstr2tree,
44
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
45
+ para_block_reader=read_blankline_block,
46
+ encoding="utf8",
47
+ tagset=None,
48
+ ):
49
+ """
50
+ :param root: The root directory for this corpus.
51
+ :param fileids: A list or regexp specifying the fileids in this corpus.
52
+ """
53
+ CorpusReader.__init__(self, root, fileids, encoding)
54
+ self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader, tagset)
55
+ """Arguments for corpus views generated by this corpus: a tuple
56
+ (str2chunktree, sent_tokenizer, para_block_tokenizer)"""
57
+
58
+ def words(self, fileids=None):
59
+ """
60
+ :return: the given file(s) as a list of words
61
+ and punctuation symbols.
62
+ :rtype: list(str)
63
+ """
64
+ return concat(
65
+ [
66
+ ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args)
67
+ for (f, enc) in self.abspaths(fileids, True)
68
+ ]
69
+ )
70
+
71
+ def sents(self, fileids=None):
72
+ """
73
+ :return: the given file(s) as a list of
74
+ sentences or utterances, each encoded as a list of word
75
+ strings.
76
+ :rtype: list(list(str))
77
+ """
78
+ return concat(
79
+ [
80
+ ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args)
81
+ for (f, enc) in self.abspaths(fileids, True)
82
+ ]
83
+ )
84
+
85
+ def paras(self, fileids=None):
86
+ """
87
+ :return: the given file(s) as a list of
88
+ paragraphs, each encoded as a list of sentences, which are
89
+ in turn encoded as lists of word strings.
90
+ :rtype: list(list(list(str)))
91
+ """
92
+ return concat(
93
+ [
94
+ ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args)
95
+ for (f, enc) in self.abspaths(fileids, True)
96
+ ]
97
+ )
98
+
99
+ def tagged_words(self, fileids=None, tagset=None):
100
+ """
101
+ :return: the given file(s) as a list of tagged
102
+ words and punctuation symbols, encoded as tuples
103
+ ``(word,tag)``.
104
+ :rtype: list(tuple(str,str))
105
+ """
106
+ return concat(
107
+ [
108
+ ChunkedCorpusView(
109
+ f, enc, 1, 0, 0, 0, *self._cv_args, target_tagset=tagset
110
+ )
111
+ for (f, enc) in self.abspaths(fileids, True)
112
+ ]
113
+ )
114
+
115
+ def tagged_sents(self, fileids=None, tagset=None):
116
+ """
117
+ :return: the given file(s) as a list of
118
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
119
+
120
+ :rtype: list(list(tuple(str,str)))
121
+ """
122
+ return concat(
123
+ [
124
+ ChunkedCorpusView(
125
+ f, enc, 1, 1, 0, 0, *self._cv_args, target_tagset=tagset
126
+ )
127
+ for (f, enc) in self.abspaths(fileids, True)
128
+ ]
129
+ )
130
+
131
+ def tagged_paras(self, fileids=None, tagset=None):
132
+ """
133
+ :return: the given file(s) as a list of
134
+ paragraphs, each encoded as a list of sentences, which are
135
+ in turn encoded as lists of ``(word,tag)`` tuples.
136
+ :rtype: list(list(list(tuple(str,str))))
137
+ """
138
+ return concat(
139
+ [
140
+ ChunkedCorpusView(
141
+ f, enc, 1, 1, 1, 0, *self._cv_args, target_tagset=tagset
142
+ )
143
+ for (f, enc) in self.abspaths(fileids, True)
144
+ ]
145
+ )
146
+
147
+ def chunked_words(self, fileids=None, tagset=None):
148
+ """
149
+ :return: the given file(s) as a list of tagged
150
+ words and chunks. Words are encoded as ``(word, tag)``
151
+ tuples (if the corpus has tags) or word strings (if the
152
+ corpus has no tags). Chunks are encoded as depth-one
153
+ trees over ``(word,tag)`` tuples or word strings.
154
+ :rtype: list(tuple(str,str) and Tree)
155
+ """
156
+ return concat(
157
+ [
158
+ ChunkedCorpusView(
159
+ f, enc, 1, 0, 0, 1, *self._cv_args, target_tagset=tagset
160
+ )
161
+ for (f, enc) in self.abspaths(fileids, True)
162
+ ]
163
+ )
164
+
165
+ def chunked_sents(self, fileids=None, tagset=None):
166
+ """
167
+ :return: the given file(s) as a list of
168
+ sentences, each encoded as a shallow Tree. The leaves
169
+ of these trees are encoded as ``(word, tag)`` tuples (if
170
+ the corpus has tags) or word strings (if the corpus has no
171
+ tags).
172
+ :rtype: list(Tree)
173
+ """
174
+ return concat(
175
+ [
176
+ ChunkedCorpusView(
177
+ f, enc, 1, 1, 0, 1, *self._cv_args, target_tagset=tagset
178
+ )
179
+ for (f, enc) in self.abspaths(fileids, True)
180
+ ]
181
+ )
182
+
183
+ def chunked_paras(self, fileids=None, tagset=None):
184
+ """
185
+ :return: the given file(s) as a list of
186
+ paragraphs, each encoded as a list of sentences, which are
187
+ in turn encoded as a shallow Tree. The leaves of these
188
+ trees are encoded as ``(word, tag)`` tuples (if the corpus
189
+ has tags) or word strings (if the corpus has no tags).
190
+ :rtype: list(list(Tree))
191
+ """
192
+ return concat(
193
+ [
194
+ ChunkedCorpusView(
195
+ f, enc, 1, 1, 1, 1, *self._cv_args, target_tagset=tagset
196
+ )
197
+ for (f, enc) in self.abspaths(fileids, True)
198
+ ]
199
+ )
200
+
201
+ def _read_block(self, stream):
202
+ return [tagstr2tree(t) for t in read_blankline_block(stream)]
203
+
204
+
205
+ class ChunkedCorpusView(StreamBackedCorpusView):
206
+ def __init__(
207
+ self,
208
+ fileid,
209
+ encoding,
210
+ tagged,
211
+ group_by_sent,
212
+ group_by_para,
213
+ chunked,
214
+ str2chunktree,
215
+ sent_tokenizer,
216
+ para_block_reader,
217
+ source_tagset=None,
218
+ target_tagset=None,
219
+ ):
220
+ StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
221
+ self._tagged = tagged
222
+ self._group_by_sent = group_by_sent
223
+ self._group_by_para = group_by_para
224
+ self._chunked = chunked
225
+ self._str2chunktree = str2chunktree
226
+ self._sent_tokenizer = sent_tokenizer
227
+ self._para_block_reader = para_block_reader
228
+ self._source_tagset = source_tagset
229
+ self._target_tagset = target_tagset
230
+
231
+ def read_block(self, stream):
232
+ block = []
233
+ for para_str in self._para_block_reader(stream):
234
+ para = []
235
+ for sent_str in self._sent_tokenizer.tokenize(para_str):
236
+ sent = self._str2chunktree(
237
+ sent_str,
238
+ source_tagset=self._source_tagset,
239
+ target_tagset=self._target_tagset,
240
+ )
241
+
242
+ # If requested, throw away the tags.
243
+ if not self._tagged:
244
+ sent = self._untag(sent)
245
+
246
+ # If requested, throw away the chunks.
247
+ if not self._chunked:
248
+ sent = sent.leaves()
249
+
250
+ # Add the sentence to `para`.
251
+ if self._group_by_sent:
252
+ para.append(sent)
253
+ else:
254
+ para.extend(sent)
255
+
256
+ # Add the paragraph to `block`.
257
+ if self._group_by_para:
258
+ block.append(para)
259
+ else:
260
+ block.extend(para)
261
+
262
+ # Return the block
263
+ return block
264
+
265
+ def _untag(self, tree):
266
+ for i, child in enumerate(tree):
267
+ if isinstance(child, Tree):
268
+ self._untag(child)
269
+ elif isinstance(child, tuple):
270
+ tree[i] = child[0]
271
+ else:
272
+ raise ValueError("expected child to be Tree or tuple")
273
+ return tree
lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Comparative Sentence Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Pierpaolo Pantone <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for the Comparative Sentence Dataset.
10
+
11
+ - Comparative Sentence Dataset information -
12
+
13
+ Annotated by: Nitin Jindal and Bing Liu, 2006.
14
+ Department of Computer Sicence
15
+ University of Illinois at Chicago
16
+
17
+ Contact: Nitin Jindal, [email protected]
18
+ Bing Liu, [email protected] (https://www.cs.uic.edu/~liub)
19
+
20
+ Distributed with permission.
21
+
22
+ Related papers:
23
+
24
+ - Nitin Jindal and Bing Liu. "Identifying Comparative Sentences in Text Documents".
25
+ Proceedings of the ACM SIGIR International Conference on Information Retrieval
26
+ (SIGIR-06), 2006.
27
+
28
+ - Nitin Jindal and Bing Liu. "Mining Comprative Sentences and Relations".
29
+ Proceedings of Twenty First National Conference on Artificial Intelligence
30
+ (AAAI-2006), 2006.
31
+
32
+ - Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences".
33
+ Proceedings of the 22nd International Conference on Computational Linguistics
34
+ (Coling-2008), Manchester, 18-22 August, 2008.
35
+ """
36
+ import re
37
+
38
+ from nltk.corpus.reader.api import *
39
+ from nltk.tokenize import *
40
+
41
+ # Regular expressions for dataset components
42
+ STARS = re.compile(r"^\*+$")
43
+ COMPARISON = re.compile(r"<cs-[1234]>")
44
+ CLOSE_COMPARISON = re.compile(r"</cs-[1234]>")
45
+ GRAD_COMPARISON = re.compile(r"<cs-[123]>")
46
+ NON_GRAD_COMPARISON = re.compile(r"<cs-4>")
47
+ ENTITIES_FEATS = re.compile(r"(\d)_((?:[\.\w\s/-](?!\d_))+)")
48
+ KEYWORD = re.compile(r"\(([^\(]*)\)$")
49
+
50
+
51
+ class Comparison:
52
+ """
53
+ A Comparison represents a comparative sentence and its constituents.
54
+ """
55
+
56
+ def __init__(
57
+ self,
58
+ text=None,
59
+ comp_type=None,
60
+ entity_1=None,
61
+ entity_2=None,
62
+ feature=None,
63
+ keyword=None,
64
+ ):
65
+ """
66
+ :param text: a string (optionally tokenized) containing a comparison.
67
+ :param comp_type: an integer defining the type of comparison expressed.
68
+ Values can be: 1 (Non-equal gradable), 2 (Equative), 3 (Superlative),
69
+ 4 (Non-gradable).
70
+ :param entity_1: the first entity considered in the comparison relation.
71
+ :param entity_2: the second entity considered in the comparison relation.
72
+ :param feature: the feature considered in the comparison relation.
73
+ :param keyword: the word or phrase which is used for that comparative relation.
74
+ """
75
+ self.text = text
76
+ self.comp_type = comp_type
77
+ self.entity_1 = entity_1
78
+ self.entity_2 = entity_2
79
+ self.feature = feature
80
+ self.keyword = keyword
81
+
82
+ def __repr__(self):
83
+ return (
84
+ 'Comparison(text="{}", comp_type={}, entity_1="{}", entity_2="{}", '
85
+ 'feature="{}", keyword="{}")'
86
+ ).format(
87
+ self.text,
88
+ self.comp_type,
89
+ self.entity_1,
90
+ self.entity_2,
91
+ self.feature,
92
+ self.keyword,
93
+ )
94
+
95
+
96
+ class ComparativeSentencesCorpusReader(CorpusReader):
97
+ """
98
+ Reader for the Comparative Sentence Dataset by Jindal and Liu (2006).
99
+
100
+ >>> from nltk.corpus import comparative_sentences
101
+ >>> comparison = comparative_sentences.comparisons()[0]
102
+ >>> comparison.text # doctest: +NORMALIZE_WHITESPACE
103
+ ['its', 'fast-forward', 'and', 'rewind', 'work', 'much', 'more', 'smoothly',
104
+ 'and', 'consistently', 'than', 'those', 'of', 'other', 'models', 'i', "'ve",
105
+ 'had', '.']
106
+ >>> comparison.entity_2
107
+ 'models'
108
+ >>> (comparison.feature, comparison.keyword)
109
+ ('rewind', 'more')
110
+ >>> len(comparative_sentences.comparisons())
111
+ 853
112
+ """
113
+
114
+ CorpusView = StreamBackedCorpusView
115
+
116
+ def __init__(
117
+ self,
118
+ root,
119
+ fileids,
120
+ word_tokenizer=WhitespaceTokenizer(),
121
+ sent_tokenizer=None,
122
+ encoding="utf8",
123
+ ):
124
+ """
125
+ :param root: The root directory for this corpus.
126
+ :param fileids: a list or regexp specifying the fileids in this corpus.
127
+ :param word_tokenizer: tokenizer for breaking sentences or paragraphs
128
+ into words. Default: `WhitespaceTokenizer`
129
+ :param sent_tokenizer: tokenizer for breaking paragraphs into sentences.
130
+ :param encoding: the encoding that should be used to read the corpus.
131
+ """
132
+
133
+ CorpusReader.__init__(self, root, fileids, encoding)
134
+ self._word_tokenizer = word_tokenizer
135
+ self._sent_tokenizer = sent_tokenizer
136
+ self._readme = "README.txt"
137
+
138
+ def comparisons(self, fileids=None):
139
+ """
140
+ Return all comparisons in the corpus.
141
+
142
+ :param fileids: a list or regexp specifying the ids of the files whose
143
+ comparisons have to be returned.
144
+ :return: the given file(s) as a list of Comparison objects.
145
+ :rtype: list(Comparison)
146
+ """
147
+ if fileids is None:
148
+ fileids = self._fileids
149
+ elif isinstance(fileids, str):
150
+ fileids = [fileids]
151
+ return concat(
152
+ [
153
+ self.CorpusView(path, self._read_comparison_block, encoding=enc)
154
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
155
+ ]
156
+ )
157
+
158
+ def keywords(self, fileids=None):
159
+ """
160
+ Return a set of all keywords used in the corpus.
161
+
162
+ :param fileids: a list or regexp specifying the ids of the files whose
163
+ keywords have to be returned.
164
+ :return: the set of keywords and comparative phrases used in the corpus.
165
+ :rtype: set(str)
166
+ """
167
+ all_keywords = concat(
168
+ [
169
+ self.CorpusView(path, self._read_keyword_block, encoding=enc)
170
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
171
+ ]
172
+ )
173
+
174
+ keywords_set = {keyword.lower() for keyword in all_keywords if keyword}
175
+ return keywords_set
176
+
177
+ def keywords_readme(self):
178
+ """
179
+ Return the list of words and constituents considered as clues of a
180
+ comparison (from listOfkeywords.txt).
181
+ """
182
+ keywords = []
183
+ with self.open("listOfkeywords.txt") as fp:
184
+ raw_text = fp.read()
185
+ for line in raw_text.split("\n"):
186
+ if not line or line.startswith("//"):
187
+ continue
188
+ keywords.append(line.strip())
189
+ return keywords
190
+
191
+ def sents(self, fileids=None):
192
+ """
193
+ Return all sentences in the corpus.
194
+
195
+ :param fileids: a list or regexp specifying the ids of the files whose
196
+ sentences have to be returned.
197
+ :return: all sentences of the corpus as lists of tokens (or as plain
198
+ strings, if no word tokenizer is specified).
199
+ :rtype: list(list(str)) or list(str)
200
+ """
201
+ return concat(
202
+ [
203
+ self.CorpusView(path, self._read_sent_block, encoding=enc)
204
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
205
+ ]
206
+ )
207
+
208
+ def words(self, fileids=None):
209
+ """
210
+ Return all words and punctuation symbols in the corpus.
211
+
212
+ :param fileids: a list or regexp specifying the ids of the files whose
213
+ words have to be returned.
214
+ :return: the given file(s) as a list of words and punctuation symbols.
215
+ :rtype: list(str)
216
+ """
217
+ return concat(
218
+ [
219
+ self.CorpusView(path, self._read_word_block, encoding=enc)
220
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
221
+ ]
222
+ )
223
+
224
+ def _read_comparison_block(self, stream):
225
+ while True:
226
+ line = stream.readline()
227
+ if not line:
228
+ return [] # end of file.
229
+ comparison_tags = re.findall(COMPARISON, line)
230
+ if comparison_tags:
231
+ grad_comparisons = re.findall(GRAD_COMPARISON, line)
232
+ non_grad_comparisons = re.findall(NON_GRAD_COMPARISON, line)
233
+ # Advance to the next line (it contains the comparative sentence)
234
+ comparison_text = stream.readline().strip()
235
+ if self._word_tokenizer:
236
+ comparison_text = self._word_tokenizer.tokenize(comparison_text)
237
+ # Skip the next line (it contains closing comparison tags)
238
+ stream.readline()
239
+ # If gradable comparisons are found, create Comparison instances
240
+ # and populate their fields
241
+ comparison_bundle = []
242
+ if grad_comparisons:
243
+ # Each comparison tag has its own relations on a separate line
244
+ for comp in grad_comparisons:
245
+ comp_type = int(re.match(r"<cs-(\d)>", comp).group(1))
246
+ comparison = Comparison(
247
+ text=comparison_text, comp_type=comp_type
248
+ )
249
+ line = stream.readline()
250
+ entities_feats = ENTITIES_FEATS.findall(line)
251
+ if entities_feats:
252
+ for (code, entity_feat) in entities_feats:
253
+ if code == "1":
254
+ comparison.entity_1 = entity_feat.strip()
255
+ elif code == "2":
256
+ comparison.entity_2 = entity_feat.strip()
257
+ elif code == "3":
258
+ comparison.feature = entity_feat.strip()
259
+ keyword = KEYWORD.findall(line)
260
+ if keyword:
261
+ comparison.keyword = keyword[0]
262
+ comparison_bundle.append(comparison)
263
+ # If non-gradable comparisons are found, create a simple Comparison
264
+ # instance for each one
265
+ if non_grad_comparisons:
266
+ for comp in non_grad_comparisons:
267
+ # comp_type in this case should always be 4.
268
+ comp_type = int(re.match(r"<cs-(\d)>", comp).group(1))
269
+ comparison = Comparison(
270
+ text=comparison_text, comp_type=comp_type
271
+ )
272
+ comparison_bundle.append(comparison)
273
+ # Flatten the list of comparisons before returning them
274
+ # return concat([comparison_bundle])
275
+ return comparison_bundle
276
+
277
+ def _read_keyword_block(self, stream):
278
+ keywords = []
279
+ for comparison in self._read_comparison_block(stream):
280
+ keywords.append(comparison.keyword)
281
+ return keywords
282
+
283
+ def _read_sent_block(self, stream):
284
+ while True:
285
+ line = stream.readline()
286
+ if re.match(STARS, line):
287
+ while True:
288
+ line = stream.readline()
289
+ if re.match(STARS, line):
290
+ break
291
+ continue
292
+ if (
293
+ not re.findall(COMPARISON, line)
294
+ and not ENTITIES_FEATS.findall(line)
295
+ and not re.findall(CLOSE_COMPARISON, line)
296
+ ):
297
+ if self._sent_tokenizer:
298
+ return [
299
+ self._word_tokenizer.tokenize(sent)
300
+ for sent in self._sent_tokenizer.tokenize(line)
301
+ ]
302
+ else:
303
+ return [self._word_tokenizer.tokenize(line)]
304
+
305
+ def _read_word_block(self, stream):
306
+ words = []
307
+ for sent in self._read_sent_block(stream):
308
+ words.extend(sent)
309
+ return words
lib/python3.10/site-packages/nltk/corpus/reader/dependency.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Dependency Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Kepa Sarasola <[email protected]>
5
+ # Iker Manterola <[email protected]>
6
+ #
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ from nltk.corpus.reader.api import *
11
+ from nltk.corpus.reader.util import *
12
+ from nltk.parse import DependencyGraph
13
+ from nltk.tokenize import *
14
+
15
+
16
+ class DependencyCorpusReader(SyntaxCorpusReader):
17
+ def __init__(
18
+ self,
19
+ root,
20
+ fileids,
21
+ encoding="utf8",
22
+ word_tokenizer=TabTokenizer(),
23
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
24
+ para_block_reader=read_blankline_block,
25
+ ):
26
+ SyntaxCorpusReader.__init__(self, root, fileids, encoding)
27
+
28
+ #########################################################
29
+
30
+ def words(self, fileids=None):
31
+ return concat(
32
+ [
33
+ DependencyCorpusView(fileid, False, False, False, encoding=enc)
34
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
35
+ ]
36
+ )
37
+
38
+ def tagged_words(self, fileids=None):
39
+ return concat(
40
+ [
41
+ DependencyCorpusView(fileid, True, False, False, encoding=enc)
42
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
43
+ ]
44
+ )
45
+
46
+ def sents(self, fileids=None):
47
+ return concat(
48
+ [
49
+ DependencyCorpusView(fileid, False, True, False, encoding=enc)
50
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
51
+ ]
52
+ )
53
+
54
+ def tagged_sents(self, fileids=None):
55
+ return concat(
56
+ [
57
+ DependencyCorpusView(fileid, True, True, False, encoding=enc)
58
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
59
+ ]
60
+ )
61
+
62
+ def parsed_sents(self, fileids=None):
63
+ sents = concat(
64
+ [
65
+ DependencyCorpusView(fileid, False, True, True, encoding=enc)
66
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
67
+ ]
68
+ )
69
+ return [DependencyGraph(sent) for sent in sents]
70
+
71
+
72
+ class DependencyCorpusView(StreamBackedCorpusView):
73
+ _DOCSTART = "-DOCSTART- -DOCSTART- O\n" # dokumentu hasiera definitzen da
74
+
75
+ def __init__(
76
+ self,
77
+ corpus_file,
78
+ tagged,
79
+ group_by_sent,
80
+ dependencies,
81
+ chunk_types=None,
82
+ encoding="utf8",
83
+ ):
84
+ self._tagged = tagged
85
+ self._dependencies = dependencies
86
+ self._group_by_sent = group_by_sent
87
+ self._chunk_types = chunk_types
88
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
89
+
90
+ def read_block(self, stream):
91
+ # Read the next sentence.
92
+ sent = read_blankline_block(stream)[0].strip()
93
+ # Strip off the docstart marker, if present.
94
+ if sent.startswith(self._DOCSTART):
95
+ sent = sent[len(self._DOCSTART) :].lstrip()
96
+
97
+ # extract word and tag from any of the formats
98
+ if not self._dependencies:
99
+ lines = [line.split("\t") for line in sent.split("\n")]
100
+ if len(lines[0]) == 3 or len(lines[0]) == 4:
101
+ sent = [(line[0], line[1]) for line in lines]
102
+ elif len(lines[0]) == 10:
103
+ sent = [(line[1], line[4]) for line in lines]
104
+ else:
105
+ raise ValueError("Unexpected number of fields in dependency tree file")
106
+
107
+ # discard tags if they weren't requested
108
+ if not self._tagged:
109
+ sent = [word for (word, tag) in sent]
110
+
111
+ # Return the result.
112
+ if self._group_by_sent:
113
+ return [sent]
114
+ else:
115
+ return list(sent)
lib/python3.10/site-packages/nltk/corpus/reader/framenet.py ADDED
The diff for this file is too large to render. See raw diff
 
lib/python3.10/site-packages/nltk/corpus/reader/knbc.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/env python
2
+ # KNB Corpus reader
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Masato Hagiwara <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # For more information, see http://lilyx.net/pages/nltkjapanesecorpus.html
9
+
10
+ import re
11
+
12
+ from nltk.corpus.reader.api import CorpusReader, SyntaxCorpusReader
13
+ from nltk.corpus.reader.util import (
14
+ FileSystemPathPointer,
15
+ find_corpus_fileids,
16
+ read_blankline_block,
17
+ )
18
+ from nltk.parse import DependencyGraph
19
+
20
+ # default function to convert morphlist to str for tree representation
21
+ _morphs2str_default = lambda morphs: "/".join(m[0] for m in morphs if m[0] != "EOS")
22
+
23
+
24
+ class KNBCorpusReader(SyntaxCorpusReader):
25
+ """
26
+ This class implements:
27
+ - ``__init__``, which specifies the location of the corpus
28
+ and a method for detecting the sentence blocks in corpus files.
29
+ - ``_read_block``, which reads a block from the input stream.
30
+ - ``_word``, which takes a block and returns a list of list of words.
31
+ - ``_tag``, which takes a block and returns a list of list of tagged
32
+ words.
33
+ - ``_parse``, which takes a block and returns a list of parsed
34
+ sentences.
35
+
36
+ The structure of tagged words:
37
+ tagged_word = (word(str), tags(tuple))
38
+ tags = (surface, reading, lemma, pos1, posid1, pos2, posid2, pos3, posid3, others ...)
39
+
40
+ Usage example
41
+
42
+ >>> from nltk.corpus.util import LazyCorpusLoader
43
+ >>> knbc = LazyCorpusLoader(
44
+ ... 'knbc/corpus1',
45
+ ... KNBCorpusReader,
46
+ ... r'.*/KN.*',
47
+ ... encoding='euc-jp',
48
+ ... )
49
+
50
+ >>> len(knbc.sents()[0])
51
+ 9
52
+
53
+ """
54
+
55
+ def __init__(self, root, fileids, encoding="utf8", morphs2str=_morphs2str_default):
56
+ """
57
+ Initialize KNBCorpusReader
58
+ morphs2str is a function to convert morphlist to str for tree representation
59
+ for _parse()
60
+ """
61
+ SyntaxCorpusReader.__init__(self, root, fileids, encoding)
62
+ self.morphs2str = morphs2str
63
+
64
+ def _read_block(self, stream):
65
+ # blocks are split by blankline (or EOF) - default
66
+ return read_blankline_block(stream)
67
+
68
+ def _word(self, t):
69
+ res = []
70
+ for line in t.splitlines():
71
+ # ignore the Bunsets headers
72
+ if not re.match(r"EOS|\*|\#|\+", line):
73
+ cells = line.strip().split(" ")
74
+ res.append(cells[0])
75
+
76
+ return res
77
+
78
+ # ignores tagset argument
79
+ def _tag(self, t, tagset=None):
80
+ res = []
81
+ for line in t.splitlines():
82
+ # ignore the Bunsets headers
83
+ if not re.match(r"EOS|\*|\#|\+", line):
84
+ cells = line.strip().split(" ")
85
+ # convert cells to morph tuples
86
+ res.append((cells[0], " ".join(cells[1:])))
87
+
88
+ return res
89
+
90
+ def _parse(self, t):
91
+ dg = DependencyGraph()
92
+ i = 0
93
+ for line in t.splitlines():
94
+ if line[0] in "*+":
95
+ # start of bunsetsu or tag
96
+
97
+ cells = line.strip().split(" ", 3)
98
+ m = re.match(r"([\-0-9]*)([ADIP])", cells[1])
99
+
100
+ assert m is not None
101
+
102
+ node = dg.nodes[i]
103
+ node.update({"address": i, "rel": m.group(2), "word": []})
104
+
105
+ dep_parent = int(m.group(1))
106
+
107
+ if dep_parent == -1:
108
+ dg.root = node
109
+ else:
110
+ dg.nodes[dep_parent]["deps"].append(i)
111
+
112
+ i += 1
113
+ elif line[0] != "#":
114
+ # normal morph
115
+ cells = line.strip().split(" ")
116
+ # convert cells to morph tuples
117
+ morph = cells[0], " ".join(cells[1:])
118
+ dg.nodes[i - 1]["word"].append(morph)
119
+
120
+ if self.morphs2str:
121
+ for node in dg.nodes.values():
122
+ node["word"] = self.morphs2str(node["word"])
123
+
124
+ return dg.tree()
125
+
126
+
127
+ ######################################################################
128
+ # Demo
129
+ ######################################################################
130
+
131
+
132
+ def demo():
133
+
134
+ import nltk
135
+ from nltk.corpus.util import LazyCorpusLoader
136
+
137
+ root = nltk.data.find("corpora/knbc/corpus1")
138
+ fileids = [
139
+ f
140
+ for f in find_corpus_fileids(FileSystemPathPointer(root), ".*")
141
+ if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)
142
+ ]
143
+
144
+ def _knbc_fileids_sort(x):
145
+ cells = x.split("-")
146
+ return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
147
+
148
+ knbc = LazyCorpusLoader(
149
+ "knbc/corpus1",
150
+ KNBCorpusReader,
151
+ sorted(fileids, key=_knbc_fileids_sort),
152
+ encoding="euc-jp",
153
+ )
154
+
155
+ print(knbc.fileids()[:10])
156
+ print("".join(knbc.words()[:100]))
157
+
158
+ print("\n\n".join(str(tree) for tree in knbc.parsed_sents()[:2]))
159
+
160
+ knbc.morphs2str = lambda morphs: "/".join(
161
+ "{}({})".format(m[0], m[1].split(" ")[2]) for m in morphs if m[0] != "EOS"
162
+ ).encode("utf-8")
163
+
164
+ print("\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2]))
165
+
166
+ print(
167
+ "\n".join(
168
+ " ".join("{}/{}".format(w[0], w[1].split(" ")[2]) for w in sent)
169
+ for sent in knbc.tagged_sents()[0:2]
170
+ )
171
+ )
172
+
173
+
174
+ def test():
175
+
176
+ from nltk.corpus.util import LazyCorpusLoader
177
+
178
+ knbc = LazyCorpusLoader(
179
+ "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp"
180
+ )
181
+ assert isinstance(knbc.words()[0], str)
182
+ assert isinstance(knbc.sents()[0][0], str)
183
+ assert isinstance(knbc.tagged_words()[0], tuple)
184
+ assert isinstance(knbc.tagged_sents()[0][0], tuple)
185
+
186
+
187
+ if __name__ == "__main__":
188
+ demo()
lib/python3.10/site-packages/nltk/corpus/reader/lin.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Lin's Thesaurus
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Dan Blanchard <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.txt
7
+
8
+ import re
9
+ from collections import defaultdict
10
+ from functools import reduce
11
+
12
+ from nltk.corpus.reader import CorpusReader
13
+
14
+
15
+ class LinThesaurusCorpusReader(CorpusReader):
16
+ """Wrapper for the LISP-formatted thesauruses distributed by Dekang Lin."""
17
+
18
+ # Compiled regular expression for extracting the key from the first line of each
19
+ # thesaurus entry
20
+ _key_re = re.compile(r'\("?([^"]+)"? \(desc [0-9.]+\).+')
21
+
22
+ @staticmethod
23
+ def __defaultdict_factory():
24
+ """Factory for creating defaultdict of defaultdict(dict)s"""
25
+ return defaultdict(dict)
26
+
27
+ def __init__(self, root, badscore=0.0):
28
+ """
29
+ Initialize the thesaurus.
30
+
31
+ :param root: root directory containing thesaurus LISP files
32
+ :type root: C{string}
33
+ :param badscore: the score to give to words which do not appear in each other's sets of synonyms
34
+ :type badscore: C{float}
35
+ """
36
+
37
+ super().__init__(root, r"sim[A-Z]\.lsp")
38
+ self._thesaurus = defaultdict(LinThesaurusCorpusReader.__defaultdict_factory)
39
+ self._badscore = badscore
40
+ for path, encoding, fileid in self.abspaths(
41
+ include_encoding=True, include_fileid=True
42
+ ):
43
+ with open(path) as lin_file:
44
+ first = True
45
+ for line in lin_file:
46
+ line = line.strip()
47
+ # Start of entry
48
+ if first:
49
+ key = LinThesaurusCorpusReader._key_re.sub(r"\1", line)
50
+ first = False
51
+ # End of entry
52
+ elif line == "))":
53
+ first = True
54
+ # Lines with pairs of ngrams and scores
55
+ else:
56
+ split_line = line.split("\t")
57
+ if len(split_line) == 2:
58
+ ngram, score = split_line
59
+ self._thesaurus[fileid][key][ngram.strip('"')] = float(
60
+ score
61
+ )
62
+
63
+ def similarity(self, ngram1, ngram2, fileid=None):
64
+ """
65
+ Returns the similarity score for two ngrams.
66
+
67
+ :param ngram1: first ngram to compare
68
+ :type ngram1: C{string}
69
+ :param ngram2: second ngram to compare
70
+ :type ngram2: C{string}
71
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
72
+ :type fileid: C{string}
73
+ :return: If fileid is specified, just the score for the two ngrams; otherwise,
74
+ list of tuples of fileids and scores.
75
+ """
76
+ # Entries don't contain themselves, so make sure similarity between item and itself is 1.0
77
+ if ngram1 == ngram2:
78
+ if fileid:
79
+ return 1.0
80
+ else:
81
+ return [(fid, 1.0) for fid in self._fileids]
82
+ else:
83
+ if fileid:
84
+ return (
85
+ self._thesaurus[fileid][ngram1][ngram2]
86
+ if ngram2 in self._thesaurus[fileid][ngram1]
87
+ else self._badscore
88
+ )
89
+ else:
90
+ return [
91
+ (
92
+ fid,
93
+ (
94
+ self._thesaurus[fid][ngram1][ngram2]
95
+ if ngram2 in self._thesaurus[fid][ngram1]
96
+ else self._badscore
97
+ ),
98
+ )
99
+ for fid in self._fileids
100
+ ]
101
+
102
+ def scored_synonyms(self, ngram, fileid=None):
103
+ """
104
+ Returns a list of scored synonyms (tuples of synonyms and scores) for the current ngram
105
+
106
+ :param ngram: ngram to lookup
107
+ :type ngram: C{string}
108
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
109
+ :type fileid: C{string}
110
+ :return: If fileid is specified, list of tuples of scores and synonyms; otherwise,
111
+ list of tuples of fileids and lists, where inner lists consist of tuples of
112
+ scores and synonyms.
113
+ """
114
+ if fileid:
115
+ return self._thesaurus[fileid][ngram].items()
116
+ else:
117
+ return [
118
+ (fileid, self._thesaurus[fileid][ngram].items())
119
+ for fileid in self._fileids
120
+ ]
121
+
122
+ def synonyms(self, ngram, fileid=None):
123
+ """
124
+ Returns a list of synonyms for the current ngram.
125
+
126
+ :param ngram: ngram to lookup
127
+ :type ngram: C{string}
128
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
129
+ :type fileid: C{string}
130
+ :return: If fileid is specified, list of synonyms; otherwise, list of tuples of fileids and
131
+ lists, where inner lists contain synonyms.
132
+ """
133
+ if fileid:
134
+ return self._thesaurus[fileid][ngram].keys()
135
+ else:
136
+ return [
137
+ (fileid, self._thesaurus[fileid][ngram].keys())
138
+ for fileid in self._fileids
139
+ ]
140
+
141
+ def __contains__(self, ngram):
142
+ """
143
+ Determines whether or not the given ngram is in the thesaurus.
144
+
145
+ :param ngram: ngram to lookup
146
+ :type ngram: C{string}
147
+ :return: whether the given ngram is in the thesaurus.
148
+ """
149
+ return reduce(
150
+ lambda accum, fileid: accum or (ngram in self._thesaurus[fileid]),
151
+ self._fileids,
152
+ False,
153
+ )
154
+
155
+
156
+ ######################################################################
157
+ # Demo
158
+ ######################################################################
159
+
160
+
161
+ def demo():
162
+ from nltk.corpus import lin_thesaurus as thes
163
+
164
+ word1 = "business"
165
+ word2 = "enterprise"
166
+ print("Getting synonyms for " + word1)
167
+ print(thes.synonyms(word1))
168
+
169
+ print("Getting scored synonyms for " + word1)
170
+ print(thes.scored_synonyms(word1))
171
+
172
+ print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
173
+ print(thes.synonyms(word1, fileid="simN.lsp"))
174
+
175
+ print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
176
+ print(thes.synonyms(word1, fileid="simN.lsp"))
177
+
178
+ print(f"Similarity score for {word1} and {word2}:")
179
+ print(thes.similarity(word1, word2))
180
+
181
+
182
+ if __name__ == "__main__":
183
+ demo()
lib/python3.10/site-packages/nltk/corpus/reader/markdown.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ from functools import partial, wraps
3
+
4
+ from nltk.corpus.reader.api import CategorizedCorpusReader
5
+ from nltk.corpus.reader.plaintext import PlaintextCorpusReader
6
+ from nltk.corpus.reader.util import concat, read_blankline_block
7
+ from nltk.tokenize import blankline_tokenize, sent_tokenize, word_tokenize
8
+
9
+
10
+ def comma_separated_string_args(func):
11
+ """
12
+ A decorator that allows a function to be called with
13
+ a single string of comma-separated values which become
14
+ individual function arguments.
15
+ """
16
+
17
+ @wraps(func)
18
+ def wrapper(*args, **kwargs):
19
+ _args = list()
20
+ for arg in args:
21
+ if isinstance(arg, str):
22
+ _args.append({part.strip() for part in arg.split(",")})
23
+ elif isinstance(arg, list):
24
+ _args.append(set(arg))
25
+ else:
26
+ _args.append(arg)
27
+ for name, value in kwargs.items():
28
+ if isinstance(value, str):
29
+ kwargs[name] = {part.strip() for part in value.split(",")}
30
+ return func(*_args, **kwargs)
31
+
32
+ return wrapper
33
+
34
+
35
+ def read_parse_blankline_block(stream, parser):
36
+ block = read_blankline_block(stream)
37
+ if block:
38
+ return [parser.render(block[0])]
39
+ return block
40
+
41
+
42
+ class MarkdownBlock:
43
+ def __init__(self, content):
44
+ self.content = content
45
+ self.truncate_at = 16
46
+
47
+ def __repr__(self):
48
+ return f"{self.__class__.__name__}(content={repr(str(self))})"
49
+
50
+ def __str__(self):
51
+ return (
52
+ f"{self.content[:self.truncate_at]}"
53
+ f"{'...' if len(self.content) > self.truncate_at else ''}"
54
+ )
55
+
56
+ @property
57
+ def raw(self):
58
+ return self.content
59
+
60
+ @property
61
+ def words(self):
62
+ return word_tokenize(self.content)
63
+
64
+ @property
65
+ def sents(self):
66
+ return [word_tokenize(sent) for sent in sent_tokenize(self.content)]
67
+
68
+ @property
69
+ def paras(self):
70
+ return [
71
+ [word_tokenize(sent) for sent in sent_tokenize(para)]
72
+ for para in blankline_tokenize(self.content)
73
+ ]
74
+
75
+
76
+ class CodeBlock(MarkdownBlock):
77
+ def __init__(self, language, *args):
78
+ self.language = language
79
+ super().__init__(*args)
80
+
81
+ @property
82
+ def sents(self):
83
+ return [word_tokenize(line) for line in self.content.splitlines()]
84
+
85
+ @property
86
+ def lines(self):
87
+ return self.content.splitlines()
88
+
89
+ @property
90
+ def paras(self):
91
+ return [
92
+ [word_tokenize(line) for line in para.splitlines()]
93
+ for para in blankline_tokenize(self.content)
94
+ ]
95
+
96
+
97
+ class MarkdownSection(MarkdownBlock):
98
+ def __init__(self, heading, level, *args):
99
+ self.heading = heading
100
+ self.level = level
101
+ super().__init__(*args)
102
+
103
+
104
+ Image = namedtuple("Image", "label, src, title")
105
+ Link = namedtuple("Link", "label, href, title")
106
+ List = namedtuple("List", "is_ordered, items")
107
+
108
+
109
+ class MarkdownCorpusReader(PlaintextCorpusReader):
110
+ def __init__(self, *args, parser=None, **kwargs):
111
+ from markdown_it import MarkdownIt
112
+ from mdit_plain.renderer import RendererPlain
113
+ from mdit_py_plugins.front_matter import front_matter_plugin
114
+
115
+ self.parser = parser
116
+ if self.parser is None:
117
+ self.parser = MarkdownIt("commonmark", renderer_cls=RendererPlain)
118
+ self.parser.use(front_matter_plugin)
119
+
120
+ kwargs.setdefault(
121
+ "para_block_reader", partial(read_parse_blankline_block, parser=self.parser)
122
+ )
123
+ super().__init__(*args, **kwargs)
124
+
125
+ # This override takes care of removing markup.
126
+ def _read_word_block(self, stream):
127
+ words = list()
128
+ for para in self._para_block_reader(stream):
129
+ words.extend(self._word_tokenizer.tokenize(para))
130
+ return words
131
+
132
+
133
+ class CategorizedMarkdownCorpusReader(CategorizedCorpusReader, MarkdownCorpusReader):
134
+ """
135
+ A reader for markdown corpora whose documents are divided into
136
+ categories based on their file identifiers.
137
+
138
+ Based on nltk.corpus.reader.plaintext.CategorizedPlaintextCorpusReader:
139
+ https://www.nltk.org/_modules/nltk/corpus/reader/api.html#CategorizedCorpusReader
140
+ """
141
+
142
+ def __init__(self, *args, cat_field="tags", **kwargs):
143
+ """
144
+ Initialize the corpus reader. Categorization arguments
145
+ (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
146
+ the ``CategorizedCorpusReader`` constructor. The remaining arguments
147
+ are passed to the ``MarkdownCorpusReader`` constructor.
148
+ """
149
+ cat_args = ["cat_pattern", "cat_map", "cat_file"]
150
+ if not any(arg in kwargs for arg in cat_args):
151
+ # Initialize with a blank map now,
152
+ # and try to build categories from document metadata later.
153
+ kwargs["cat_map"] = dict()
154
+ CategorizedCorpusReader.__init__(self, kwargs)
155
+ MarkdownCorpusReader.__init__(self, *args, **kwargs)
156
+
157
+ # Map file IDs to categories if self._map exists but is still empty:
158
+ if self._map is not None and not self._map:
159
+ for file_id in self._fileids:
160
+ metadata = self.metadata(file_id)
161
+ if metadata:
162
+ self._map[file_id] = metadata[0].get(cat_field, [])
163
+
164
+ ### Begin CategorizedCorpusReader Overrides
165
+ @comma_separated_string_args
166
+ def categories(self, fileids=None):
167
+ return super().categories(fileids)
168
+
169
+ @comma_separated_string_args
170
+ def fileids(self, categories=None):
171
+ if categories is None:
172
+ return self._fileids
173
+ return super().fileids(categories)
174
+
175
+ ### End CategorizedCorpusReader Overrides
176
+
177
+ ### Begin MarkdownCorpusReader Overrides
178
+ @comma_separated_string_args
179
+ def raw(self, fileids=None, categories=None):
180
+ return super().raw(self._resolve(fileids, categories))
181
+
182
+ @comma_separated_string_args
183
+ def words(self, fileids=None, categories=None):
184
+ return super().words(self._resolve(fileids, categories))
185
+
186
+ @comma_separated_string_args
187
+ def sents(self, fileids=None, categories=None):
188
+ return super().sents(self._resolve(fileids, categories))
189
+
190
+ @comma_separated_string_args
191
+ def paras(self, fileids=None, categories=None):
192
+ return super().paras(self._resolve(fileids, categories))
193
+
194
+ ### End MarkdownCorpusReader Overrides
195
+
196
+ def concatenated_view(self, reader, fileids, categories):
197
+ return concat(
198
+ [
199
+ self.CorpusView(path, reader, encoding=enc)
200
+ for (path, enc) in self.abspaths(
201
+ self._resolve(fileids, categories), include_encoding=True
202
+ )
203
+ ]
204
+ )
205
+
206
+ def metadata_reader(self, stream):
207
+ from yaml import safe_load
208
+
209
+ return [
210
+ safe_load(t.content)
211
+ for t in self.parser.parse(stream.read())
212
+ if t.type == "front_matter"
213
+ ]
214
+
215
+ @comma_separated_string_args
216
+ def metadata(self, fileids=None, categories=None):
217
+ return self.concatenated_view(self.metadata_reader, fileids, categories)
218
+
219
+ def blockquote_reader(self, stream):
220
+ tokens = self.parser.parse(stream.read())
221
+ opening_tokens = filter(
222
+ lambda t: t.level == 0 and t.type == "blockquote_open", tokens
223
+ )
224
+ closing_tokens = filter(
225
+ lambda t: t.level == 0 and t.type == "blockquote_close", tokens
226
+ )
227
+ blockquotes = list()
228
+ for o, c in zip(opening_tokens, closing_tokens):
229
+ opening_index = tokens.index(o)
230
+ closing_index = tokens.index(c, opening_index)
231
+ blockquotes.append(tokens[opening_index : closing_index + 1])
232
+ return [
233
+ MarkdownBlock(
234
+ self.parser.renderer.render(block, self.parser.options, env=None)
235
+ )
236
+ for block in blockquotes
237
+ ]
238
+
239
+ @comma_separated_string_args
240
+ def blockquotes(self, fileids=None, categories=None):
241
+ return self.concatenated_view(self.blockquote_reader, fileids, categories)
242
+
243
+ def code_block_reader(self, stream):
244
+ return [
245
+ CodeBlock(
246
+ t.info,
247
+ t.content,
248
+ )
249
+ for t in self.parser.parse(stream.read())
250
+ if t.level == 0 and t.type in ("fence", "code_block")
251
+ ]
252
+
253
+ @comma_separated_string_args
254
+ def code_blocks(self, fileids=None, categories=None):
255
+ return self.concatenated_view(self.code_block_reader, fileids, categories)
256
+
257
+ def image_reader(self, stream):
258
+ return [
259
+ Image(
260
+ child_token.content,
261
+ child_token.attrGet("src"),
262
+ child_token.attrGet("title"),
263
+ )
264
+ for inline_token in filter(
265
+ lambda t: t.type == "inline", self.parser.parse(stream.read())
266
+ )
267
+ for child_token in inline_token.children
268
+ if child_token.type == "image"
269
+ ]
270
+
271
+ @comma_separated_string_args
272
+ def images(self, fileids=None, categories=None):
273
+ return self.concatenated_view(self.image_reader, fileids, categories)
274
+
275
+ def link_reader(self, stream):
276
+ return [
277
+ Link(
278
+ inline_token.children[i + 1].content,
279
+ child_token.attrGet("href"),
280
+ child_token.attrGet("title"),
281
+ )
282
+ for inline_token in filter(
283
+ lambda t: t.type == "inline", self.parser.parse(stream.read())
284
+ )
285
+ for i, child_token in enumerate(inline_token.children)
286
+ if child_token.type == "link_open"
287
+ ]
288
+
289
+ @comma_separated_string_args
290
+ def links(self, fileids=None, categories=None):
291
+ return self.concatenated_view(self.link_reader, fileids, categories)
292
+
293
+ def list_reader(self, stream):
294
+ tokens = self.parser.parse(stream.read())
295
+ opening_types = ("bullet_list_open", "ordered_list_open")
296
+ opening_tokens = filter(
297
+ lambda t: t.level == 0 and t.type in opening_types, tokens
298
+ )
299
+ closing_types = ("bullet_list_close", "ordered_list_close")
300
+ closing_tokens = filter(
301
+ lambda t: t.level == 0 and t.type in closing_types, tokens
302
+ )
303
+ list_blocks = list()
304
+ for o, c in zip(opening_tokens, closing_tokens):
305
+ opening_index = tokens.index(o)
306
+ closing_index = tokens.index(c, opening_index)
307
+ list_blocks.append(tokens[opening_index : closing_index + 1])
308
+ return [
309
+ List(
310
+ tokens[0].type == "ordered_list_open",
311
+ [t.content for t in tokens if t.content],
312
+ )
313
+ for tokens in list_blocks
314
+ ]
315
+
316
+ @comma_separated_string_args
317
+ def lists(self, fileids=None, categories=None):
318
+ return self.concatenated_view(self.list_reader, fileids, categories)
319
+
320
+ def section_reader(self, stream):
321
+ section_blocks, block = list(), list()
322
+ in_heading = False
323
+ for t in self.parser.parse(stream.read()):
324
+ if t.level == 0 and t.type == "heading_open":
325
+ if block:
326
+ section_blocks.append(block)
327
+ block = list()
328
+ in_heading = True
329
+ if in_heading:
330
+ block.append(t)
331
+ return [
332
+ MarkdownSection(
333
+ block[1].content,
334
+ block[0].markup.count("#"),
335
+ self.parser.renderer.render(block, self.parser.options, env=None),
336
+ )
337
+ for block in section_blocks
338
+ ]
339
+
340
+ @comma_separated_string_args
341
+ def sections(self, fileids=None, categories=None):
342
+ return self.concatenated_view(self.section_reader, fileids, categories)
lib/python3.10/site-packages/nltk/corpus/reader/mte.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A reader for corpora whose documents are in MTE format.
3
+ """
4
+ import os
5
+ import re
6
+ from functools import reduce
7
+
8
+ from nltk.corpus.reader import TaggedCorpusReader, concat
9
+ from nltk.corpus.reader.xmldocs import XMLCorpusView
10
+
11
+
12
+ def xpath(root, path, ns):
13
+ return root.findall(path, ns)
14
+
15
+
16
+ class MTECorpusView(XMLCorpusView):
17
+ """
18
+ Class for lazy viewing the MTE Corpus.
19
+ """
20
+
21
+ def __init__(self, fileid, tagspec, elt_handler=None):
22
+ XMLCorpusView.__init__(self, fileid, tagspec, elt_handler)
23
+
24
+ def read_block(self, stream, tagspec=None, elt_handler=None):
25
+ return list(
26
+ filter(
27
+ lambda x: x is not None,
28
+ XMLCorpusView.read_block(self, stream, tagspec, elt_handler),
29
+ )
30
+ )
31
+
32
+
33
+ class MTEFileReader:
34
+ """
35
+ Class for loading the content of the multext-east corpus. It
36
+ parses the xml files and does some tag-filtering depending on the
37
+ given method parameters.
38
+ """
39
+
40
+ ns = {
41
+ "tei": "https://www.tei-c.org/ns/1.0",
42
+ "xml": "https://www.w3.org/XML/1998/namespace",
43
+ }
44
+ tag_ns = "{https://www.tei-c.org/ns/1.0}"
45
+ xml_ns = "{https://www.w3.org/XML/1998/namespace}"
46
+ word_path = "TEI/text/body/div/div/p/s/(w|c)"
47
+ sent_path = "TEI/text/body/div/div/p/s"
48
+ para_path = "TEI/text/body/div/div/p"
49
+
50
+ def __init__(self, file_path):
51
+ self.__file_path = file_path
52
+
53
+ @classmethod
54
+ def _word_elt(cls, elt, context):
55
+ return elt.text
56
+
57
+ @classmethod
58
+ def _sent_elt(cls, elt, context):
59
+ return [cls._word_elt(w, None) for w in xpath(elt, "*", cls.ns)]
60
+
61
+ @classmethod
62
+ def _para_elt(cls, elt, context):
63
+ return [cls._sent_elt(s, None) for s in xpath(elt, "*", cls.ns)]
64
+
65
+ @classmethod
66
+ def _tagged_word_elt(cls, elt, context):
67
+ if "ana" not in elt.attrib:
68
+ return (elt.text, "")
69
+
70
+ if cls.__tags == "" and cls.__tagset == "msd":
71
+ return (elt.text, elt.attrib["ana"])
72
+ elif cls.__tags == "" and cls.__tagset == "universal":
73
+ return (elt.text, MTETagConverter.msd_to_universal(elt.attrib["ana"]))
74
+ else:
75
+ tags = re.compile("^" + re.sub("-", ".", cls.__tags) + ".*$")
76
+ if tags.match(elt.attrib["ana"]):
77
+ if cls.__tagset == "msd":
78
+ return (elt.text, elt.attrib["ana"])
79
+ else:
80
+ return (
81
+ elt.text,
82
+ MTETagConverter.msd_to_universal(elt.attrib["ana"]),
83
+ )
84
+ else:
85
+ return None
86
+
87
+ @classmethod
88
+ def _tagged_sent_elt(cls, elt, context):
89
+ return list(
90
+ filter(
91
+ lambda x: x is not None,
92
+ [cls._tagged_word_elt(w, None) for w in xpath(elt, "*", cls.ns)],
93
+ )
94
+ )
95
+
96
+ @classmethod
97
+ def _tagged_para_elt(cls, elt, context):
98
+ return list(
99
+ filter(
100
+ lambda x: x is not None,
101
+ [cls._tagged_sent_elt(s, None) for s in xpath(elt, "*", cls.ns)],
102
+ )
103
+ )
104
+
105
+ @classmethod
106
+ def _lemma_word_elt(cls, elt, context):
107
+ if "lemma" not in elt.attrib:
108
+ return (elt.text, "")
109
+ else:
110
+ return (elt.text, elt.attrib["lemma"])
111
+
112
+ @classmethod
113
+ def _lemma_sent_elt(cls, elt, context):
114
+ return [cls._lemma_word_elt(w, None) for w in xpath(elt, "*", cls.ns)]
115
+
116
+ @classmethod
117
+ def _lemma_para_elt(cls, elt, context):
118
+ return [cls._lemma_sent_elt(s, None) for s in xpath(elt, "*", cls.ns)]
119
+
120
+ def words(self):
121
+ return MTECorpusView(
122
+ self.__file_path, MTEFileReader.word_path, MTEFileReader._word_elt
123
+ )
124
+
125
+ def sents(self):
126
+ return MTECorpusView(
127
+ self.__file_path, MTEFileReader.sent_path, MTEFileReader._sent_elt
128
+ )
129
+
130
+ def paras(self):
131
+ return MTECorpusView(
132
+ self.__file_path, MTEFileReader.para_path, MTEFileReader._para_elt
133
+ )
134
+
135
+ def lemma_words(self):
136
+ return MTECorpusView(
137
+ self.__file_path, MTEFileReader.word_path, MTEFileReader._lemma_word_elt
138
+ )
139
+
140
+ def tagged_words(self, tagset, tags):
141
+ MTEFileReader.__tagset = tagset
142
+ MTEFileReader.__tags = tags
143
+ return MTECorpusView(
144
+ self.__file_path, MTEFileReader.word_path, MTEFileReader._tagged_word_elt
145
+ )
146
+
147
+ def lemma_sents(self):
148
+ return MTECorpusView(
149
+ self.__file_path, MTEFileReader.sent_path, MTEFileReader._lemma_sent_elt
150
+ )
151
+
152
+ def tagged_sents(self, tagset, tags):
153
+ MTEFileReader.__tagset = tagset
154
+ MTEFileReader.__tags = tags
155
+ return MTECorpusView(
156
+ self.__file_path, MTEFileReader.sent_path, MTEFileReader._tagged_sent_elt
157
+ )
158
+
159
+ def lemma_paras(self):
160
+ return MTECorpusView(
161
+ self.__file_path, MTEFileReader.para_path, MTEFileReader._lemma_para_elt
162
+ )
163
+
164
+ def tagged_paras(self, tagset, tags):
165
+ MTEFileReader.__tagset = tagset
166
+ MTEFileReader.__tags = tags
167
+ return MTECorpusView(
168
+ self.__file_path, MTEFileReader.para_path, MTEFileReader._tagged_para_elt
169
+ )
170
+
171
+
172
+ class MTETagConverter:
173
+ """
174
+ Class for converting msd tags to universal tags, more conversion
175
+ options are currently not implemented.
176
+ """
177
+
178
+ mapping_msd_universal = {
179
+ "A": "ADJ",
180
+ "S": "ADP",
181
+ "R": "ADV",
182
+ "C": "CONJ",
183
+ "D": "DET",
184
+ "N": "NOUN",
185
+ "M": "NUM",
186
+ "Q": "PRT",
187
+ "P": "PRON",
188
+ "V": "VERB",
189
+ ".": ".",
190
+ "-": "X",
191
+ }
192
+
193
+ @staticmethod
194
+ def msd_to_universal(tag):
195
+ """
196
+ This function converts the annotation from the Multex-East to the universal tagset
197
+ as described in Chapter 5 of the NLTK-Book
198
+
199
+ Unknown Tags will be mapped to X. Punctuation marks are not supported in MSD tags, so
200
+ """
201
+ indicator = tag[0] if not tag[0] == "#" else tag[1]
202
+
203
+ if not indicator in MTETagConverter.mapping_msd_universal:
204
+ indicator = "-"
205
+
206
+ return MTETagConverter.mapping_msd_universal[indicator]
207
+
208
+
209
+ class MTECorpusReader(TaggedCorpusReader):
210
+ """
211
+ Reader for corpora following the TEI-p5 xml scheme, such as MULTEXT-East.
212
+ MULTEXT-East contains part-of-speech-tagged words with a quite precise tagging
213
+ scheme. These tags can be converted to the Universal tagset
214
+ """
215
+
216
+ def __init__(self, root=None, fileids=None, encoding="utf8"):
217
+ """
218
+ Construct a new MTECorpusreader for a set of documents
219
+ located at the given root directory. Example usage:
220
+
221
+ >>> root = '/...path to corpus.../'
222
+ >>> reader = MTECorpusReader(root, 'oana-*.xml', 'utf8') # doctest: +SKIP
223
+
224
+ :param root: The root directory for this corpus. (default points to location in multext config file)
225
+ :param fileids: A list or regexp specifying the fileids in this corpus. (default is oana-en.xml)
226
+ :param encoding: The encoding of the given files (default is utf8)
227
+ """
228
+ TaggedCorpusReader.__init__(self, root, fileids, encoding)
229
+ self._readme = "00README.txt"
230
+
231
+ def __fileids(self, fileids):
232
+ if fileids is None:
233
+ fileids = self._fileids
234
+ elif isinstance(fileids, str):
235
+ fileids = [fileids]
236
+ # filter wrong userinput
237
+ fileids = filter(lambda x: x in self._fileids, fileids)
238
+ # filter multext-east sourcefiles that are not compatible to the teip5 specification
239
+ fileids = filter(lambda x: x not in ["oana-bg.xml", "oana-mk.xml"], fileids)
240
+ if not fileids:
241
+ print("No valid multext-east file specified")
242
+ return fileids
243
+
244
+ def words(self, fileids=None):
245
+ """
246
+ :param fileids: A list specifying the fileids that should be used.
247
+ :return: the given file(s) as a list of words and punctuation symbols.
248
+ :rtype: list(str)
249
+ """
250
+ return concat(
251
+ [
252
+ MTEFileReader(os.path.join(self._root, f)).words()
253
+ for f in self.__fileids(fileids)
254
+ ]
255
+ )
256
+
257
+ def sents(self, fileids=None):
258
+ """
259
+ :param fileids: A list specifying the fileids that should be used.
260
+ :return: the given file(s) as a list of sentences or utterances,
261
+ each encoded as a list of word strings
262
+ :rtype: list(list(str))
263
+ """
264
+ return concat(
265
+ [
266
+ MTEFileReader(os.path.join(self._root, f)).sents()
267
+ for f in self.__fileids(fileids)
268
+ ]
269
+ )
270
+
271
+ def paras(self, fileids=None):
272
+ """
273
+ :param fileids: A list specifying the fileids that should be used.
274
+ :return: the given file(s) as a list of paragraphs, each encoded as a list
275
+ of sentences, which are in turn encoded as lists of word string
276
+ :rtype: list(list(list(str)))
277
+ """
278
+ return concat(
279
+ [
280
+ MTEFileReader(os.path.join(self._root, f)).paras()
281
+ for f in self.__fileids(fileids)
282
+ ]
283
+ )
284
+
285
+ def lemma_words(self, fileids=None):
286
+ """
287
+ :param fileids: A list specifying the fileids that should be used.
288
+ :return: the given file(s) as a list of words, the corresponding lemmas
289
+ and punctuation symbols, encoded as tuples (word, lemma)
290
+ :rtype: list(tuple(str,str))
291
+ """
292
+ return concat(
293
+ [
294
+ MTEFileReader(os.path.join(self._root, f)).lemma_words()
295
+ for f in self.__fileids(fileids)
296
+ ]
297
+ )
298
+
299
+ def tagged_words(self, fileids=None, tagset="msd", tags=""):
300
+ """
301
+ :param fileids: A list specifying the fileids that should be used.
302
+ :param tagset: The tagset that should be used in the returned object,
303
+ either "universal" or "msd", "msd" is the default
304
+ :param tags: An MSD Tag that is used to filter all parts of the used corpus
305
+ that are not more precise or at least equal to the given tag
306
+ :return: the given file(s) as a list of tagged words and punctuation symbols
307
+ encoded as tuples (word, tag)
308
+ :rtype: list(tuple(str, str))
309
+ """
310
+ if tagset == "universal" or tagset == "msd":
311
+ return concat(
312
+ [
313
+ MTEFileReader(os.path.join(self._root, f)).tagged_words(
314
+ tagset, tags
315
+ )
316
+ for f in self.__fileids(fileids)
317
+ ]
318
+ )
319
+ else:
320
+ print("Unknown tagset specified.")
321
+
322
+ def lemma_sents(self, fileids=None):
323
+ """
324
+ :param fileids: A list specifying the fileids that should be used.
325
+ :return: the given file(s) as a list of sentences or utterances, each
326
+ encoded as a list of tuples of the word and the corresponding
327
+ lemma (word, lemma)
328
+ :rtype: list(list(tuple(str, str)))
329
+ """
330
+ return concat(
331
+ [
332
+ MTEFileReader(os.path.join(self._root, f)).lemma_sents()
333
+ for f in self.__fileids(fileids)
334
+ ]
335
+ )
336
+
337
+ def tagged_sents(self, fileids=None, tagset="msd", tags=""):
338
+ """
339
+ :param fileids: A list specifying the fileids that should be used.
340
+ :param tagset: The tagset that should be used in the returned object,
341
+ either "universal" or "msd", "msd" is the default
342
+ :param tags: An MSD Tag that is used to filter all parts of the used corpus
343
+ that are not more precise or at least equal to the given tag
344
+ :return: the given file(s) as a list of sentences or utterances, each
345
+ each encoded as a list of (word,tag) tuples
346
+ :rtype: list(list(tuple(str, str)))
347
+ """
348
+ if tagset == "universal" or tagset == "msd":
349
+ return concat(
350
+ [
351
+ MTEFileReader(os.path.join(self._root, f)).tagged_sents(
352
+ tagset, tags
353
+ )
354
+ for f in self.__fileids(fileids)
355
+ ]
356
+ )
357
+ else:
358
+ print("Unknown tagset specified.")
359
+
360
+ def lemma_paras(self, fileids=None):
361
+ """
362
+ :param fileids: A list specifying the fileids that should be used.
363
+ :return: the given file(s) as a list of paragraphs, each encoded as a
364
+ list of sentences, which are in turn encoded as a list of
365
+ tuples of the word and the corresponding lemma (word, lemma)
366
+ :rtype: list(List(List(tuple(str, str))))
367
+ """
368
+ return concat(
369
+ [
370
+ MTEFileReader(os.path.join(self._root, f)).lemma_paras()
371
+ for f in self.__fileids(fileids)
372
+ ]
373
+ )
374
+
375
+ def tagged_paras(self, fileids=None, tagset="msd", tags=""):
376
+ """
377
+ :param fileids: A list specifying the fileids that should be used.
378
+ :param tagset: The tagset that should be used in the returned object,
379
+ either "universal" or "msd", "msd" is the default
380
+ :param tags: An MSD Tag that is used to filter all parts of the used corpus
381
+ that are not more precise or at least equal to the given tag
382
+ :return: the given file(s) as a list of paragraphs, each encoded as a
383
+ list of sentences, which are in turn encoded as a list
384
+ of (word,tag) tuples
385
+ :rtype: list(list(list(tuple(str, str))))
386
+ """
387
+ if tagset == "universal" or tagset == "msd":
388
+ return concat(
389
+ [
390
+ MTEFileReader(os.path.join(self._root, f)).tagged_paras(
391
+ tagset, tags
392
+ )
393
+ for f in self.__fileids(fileids)
394
+ ]
395
+ )
396
+ else:
397
+ print("Unknown tagset specified.")
lib/python3.10/site-packages/nltk/corpus/reader/nombank.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: NomBank Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Paul Bedaride <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from functools import total_ordering
10
+ from xml.etree import ElementTree
11
+
12
+ from nltk.corpus.reader.api import *
13
+ from nltk.corpus.reader.util import *
14
+ from nltk.internals import raise_unorderable_types
15
+ from nltk.tree import Tree
16
+
17
+
18
+ class NombankCorpusReader(CorpusReader):
19
+ """
20
+ Corpus reader for the nombank corpus, which augments the Penn
21
+ Treebank with information about the predicate argument structure
22
+ of every noun instance. The corpus consists of two parts: the
23
+ predicate-argument annotations themselves, and a set of "frameset
24
+ files" which define the argument labels used by the annotations,
25
+ on a per-noun basis. Each "frameset file" contains one or more
26
+ predicates, such as ``'turn'`` or ``'turn_on'``, each of which is
27
+ divided into coarse-grained word senses called "rolesets". For
28
+ each "roleset", the frameset file provides descriptions of the
29
+ argument roles, along with examples.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ root,
35
+ nomfile,
36
+ framefiles="",
37
+ nounsfile=None,
38
+ parse_fileid_xform=None,
39
+ parse_corpus=None,
40
+ encoding="utf8",
41
+ ):
42
+ """
43
+ :param root: The root directory for this corpus.
44
+ :param nomfile: The name of the file containing the predicate-
45
+ argument annotations (relative to ``root``).
46
+ :param framefiles: A list or regexp specifying the frameset
47
+ fileids for this corpus.
48
+ :param parse_fileid_xform: A transform that should be applied
49
+ to the fileids in this corpus. This should be a function
50
+ of one argument (a fileid) that returns a string (the new
51
+ fileid).
52
+ :param parse_corpus: The corpus containing the parse trees
53
+ corresponding to this corpus. These parse trees are
54
+ necessary to resolve the tree pointers used by nombank.
55
+ """
56
+
57
+ # If framefiles is specified as a regexp, expand it.
58
+ if isinstance(framefiles, str):
59
+ self._fileids = find_corpus_fileids(root, framefiles)
60
+ self._fileids = list(framefiles)
61
+ # Initialize the corpus reader.
62
+ CorpusReader.__init__(self, root, framefiles, encoding)
63
+
64
+ # Record our nom file & nouns file.
65
+ self._nomfile = nomfile
66
+ self._nounsfile = nounsfile
67
+ self._parse_fileid_xform = parse_fileid_xform
68
+ self._parse_corpus = parse_corpus
69
+
70
+ def instances(self, baseform=None):
71
+ """
72
+ :return: a corpus view that acts as a list of
73
+ ``NombankInstance`` objects, one for each noun in the corpus.
74
+ """
75
+ kwargs = {}
76
+ if baseform is not None:
77
+ kwargs["instance_filter"] = lambda inst: inst.baseform == baseform
78
+ return StreamBackedCorpusView(
79
+ self.abspath(self._nomfile),
80
+ lambda stream: self._read_instance_block(stream, **kwargs),
81
+ encoding=self.encoding(self._nomfile),
82
+ )
83
+
84
+ def lines(self):
85
+ """
86
+ :return: a corpus view that acts as a list of strings, one for
87
+ each line in the predicate-argument annotation file.
88
+ """
89
+ return StreamBackedCorpusView(
90
+ self.abspath(self._nomfile),
91
+ read_line_block,
92
+ encoding=self.encoding(self._nomfile),
93
+ )
94
+
95
+ def roleset(self, roleset_id):
96
+ """
97
+ :return: the xml description for the given roleset.
98
+ """
99
+ baseform = roleset_id.split(".")[0]
100
+ baseform = baseform.replace("perc-sign", "%")
101
+ baseform = baseform.replace("oneslashonezero", "1/10").replace(
102
+ "1/10", "1-slash-10"
103
+ )
104
+ framefile = "frames/%s.xml" % baseform
105
+ if framefile not in self.fileids():
106
+ raise ValueError("Frameset file for %s not found" % roleset_id)
107
+
108
+ # n.b.: The encoding for XML fileids is specified by the file
109
+ # itself; so we ignore self._encoding here.
110
+ with self.abspath(framefile).open() as fp:
111
+ etree = ElementTree.parse(fp).getroot()
112
+ for roleset in etree.findall("predicate/roleset"):
113
+ if roleset.attrib["id"] == roleset_id:
114
+ return roleset
115
+ raise ValueError(f"Roleset {roleset_id} not found in {framefile}")
116
+
117
+ def rolesets(self, baseform=None):
118
+ """
119
+ :return: list of xml descriptions for rolesets.
120
+ """
121
+ if baseform is not None:
122
+ framefile = "frames/%s.xml" % baseform
123
+ if framefile not in self.fileids():
124
+ raise ValueError("Frameset file for %s not found" % baseform)
125
+ framefiles = [framefile]
126
+ else:
127
+ framefiles = self.fileids()
128
+
129
+ rsets = []
130
+ for framefile in framefiles:
131
+ # n.b.: The encoding for XML fileids is specified by the file
132
+ # itself; so we ignore self._encoding here.
133
+ with self.abspath(framefile).open() as fp:
134
+ etree = ElementTree.parse(fp).getroot()
135
+ rsets.append(etree.findall("predicate/roleset"))
136
+ return LazyConcatenation(rsets)
137
+
138
+ def nouns(self):
139
+ """
140
+ :return: a corpus view that acts as a list of all noun lemmas
141
+ in this corpus (from the nombank.1.0.words file).
142
+ """
143
+ return StreamBackedCorpusView(
144
+ self.abspath(self._nounsfile),
145
+ read_line_block,
146
+ encoding=self.encoding(self._nounsfile),
147
+ )
148
+
149
+ def _read_instance_block(self, stream, instance_filter=lambda inst: True):
150
+ block = []
151
+
152
+ # Read 100 at a time.
153
+ for i in range(100):
154
+ line = stream.readline().strip()
155
+ if line:
156
+ inst = NombankInstance.parse(
157
+ line, self._parse_fileid_xform, self._parse_corpus
158
+ )
159
+ if instance_filter(inst):
160
+ block.append(inst)
161
+
162
+ return block
163
+
164
+
165
+ ######################################################################
166
+ # { Nombank Instance & related datatypes
167
+ ######################################################################
168
+
169
+
170
+ class NombankInstance:
171
+ def __init__(
172
+ self,
173
+ fileid,
174
+ sentnum,
175
+ wordnum,
176
+ baseform,
177
+ sensenumber,
178
+ predicate,
179
+ predid,
180
+ arguments,
181
+ parse_corpus=None,
182
+ ):
183
+
184
+ self.fileid = fileid
185
+ """The name of the file containing the parse tree for this
186
+ instance's sentence."""
187
+
188
+ self.sentnum = sentnum
189
+ """The sentence number of this sentence within ``fileid``.
190
+ Indexing starts from zero."""
191
+
192
+ self.wordnum = wordnum
193
+ """The word number of this instance's predicate within its
194
+ containing sentence. Word numbers are indexed starting from
195
+ zero, and include traces and other empty parse elements."""
196
+
197
+ self.baseform = baseform
198
+ """The baseform of the predicate."""
199
+
200
+ self.sensenumber = sensenumber
201
+ """The sense number of the predicate."""
202
+
203
+ self.predicate = predicate
204
+ """A ``NombankTreePointer`` indicating the position of this
205
+ instance's predicate within its containing sentence."""
206
+
207
+ self.predid = predid
208
+ """Identifier of the predicate."""
209
+
210
+ self.arguments = tuple(arguments)
211
+ """A list of tuples (argloc, argid), specifying the location
212
+ and identifier for each of the predicate's argument in the
213
+ containing sentence. Argument identifiers are strings such as
214
+ ``'ARG0'`` or ``'ARGM-TMP'``. This list does *not* contain
215
+ the predicate."""
216
+
217
+ self.parse_corpus = parse_corpus
218
+ """A corpus reader for the parse trees corresponding to the
219
+ instances in this nombank corpus."""
220
+
221
+ @property
222
+ def roleset(self):
223
+ """The name of the roleset used by this instance's predicate.
224
+ Use ``nombank.roleset() <NombankCorpusReader.roleset>`` to
225
+ look up information about the roleset."""
226
+ r = self.baseform.replace("%", "perc-sign")
227
+ r = r.replace("1/10", "1-slash-10").replace("1-slash-10", "oneslashonezero")
228
+ return f"{r}.{self.sensenumber}"
229
+
230
+ def __repr__(self):
231
+ return "<NombankInstance: {}, sent {}, word {}>".format(
232
+ self.fileid,
233
+ self.sentnum,
234
+ self.wordnum,
235
+ )
236
+
237
+ def __str__(self):
238
+ s = "{} {} {} {} {}".format(
239
+ self.fileid,
240
+ self.sentnum,
241
+ self.wordnum,
242
+ self.baseform,
243
+ self.sensenumber,
244
+ )
245
+ items = self.arguments + ((self.predicate, "rel"),)
246
+ for (argloc, argid) in sorted(items):
247
+ s += f" {argloc}-{argid}"
248
+ return s
249
+
250
+ def _get_tree(self):
251
+ if self.parse_corpus is None:
252
+ return None
253
+ if self.fileid not in self.parse_corpus.fileids():
254
+ return None
255
+ return self.parse_corpus.parsed_sents(self.fileid)[self.sentnum]
256
+
257
+ tree = property(
258
+ _get_tree,
259
+ doc="""
260
+ The parse tree corresponding to this instance, or None if
261
+ the corresponding tree is not available.""",
262
+ )
263
+
264
+ @staticmethod
265
+ def parse(s, parse_fileid_xform=None, parse_corpus=None):
266
+ pieces = s.split()
267
+ if len(pieces) < 6:
268
+ raise ValueError("Badly formatted nombank line: %r" % s)
269
+
270
+ # Divide the line into its basic pieces.
271
+ (fileid, sentnum, wordnum, baseform, sensenumber) = pieces[:5]
272
+
273
+ args = pieces[5:]
274
+ rel = [args.pop(i) for i, p in enumerate(args) if "-rel" in p]
275
+ if len(rel) != 1:
276
+ raise ValueError("Badly formatted nombank line: %r" % s)
277
+
278
+ # Apply the fileid selector, if any.
279
+ if parse_fileid_xform is not None:
280
+ fileid = parse_fileid_xform(fileid)
281
+
282
+ # Convert sentence & word numbers to ints.
283
+ sentnum = int(sentnum)
284
+ wordnum = int(wordnum)
285
+
286
+ # Parse the predicate location.
287
+
288
+ predloc, predid = rel[0].split("-", 1)
289
+ predicate = NombankTreePointer.parse(predloc)
290
+
291
+ # Parse the arguments.
292
+ arguments = []
293
+ for arg in args:
294
+ argloc, argid = arg.split("-", 1)
295
+ arguments.append((NombankTreePointer.parse(argloc), argid))
296
+
297
+ # Put it all together.
298
+ return NombankInstance(
299
+ fileid,
300
+ sentnum,
301
+ wordnum,
302
+ baseform,
303
+ sensenumber,
304
+ predicate,
305
+ predid,
306
+ arguments,
307
+ parse_corpus,
308
+ )
309
+
310
+
311
+ class NombankPointer:
312
+ """
313
+ A pointer used by nombank to identify one or more constituents in
314
+ a parse tree. ``NombankPointer`` is an abstract base class with
315
+ three concrete subclasses:
316
+
317
+ - ``NombankTreePointer`` is used to point to single constituents.
318
+ - ``NombankSplitTreePointer`` is used to point to 'split'
319
+ constituents, which consist of a sequence of two or more
320
+ ``NombankTreePointer`` pointers.
321
+ - ``NombankChainTreePointer`` is used to point to entire trace
322
+ chains in a tree. It consists of a sequence of pieces, which
323
+ can be ``NombankTreePointer`` or ``NombankSplitTreePointer`` pointers.
324
+ """
325
+
326
+ def __init__(self):
327
+ if self.__class__ == NombankPointer:
328
+ raise NotImplementedError()
329
+
330
+
331
+ class NombankChainTreePointer(NombankPointer):
332
+ def __init__(self, pieces):
333
+ self.pieces = pieces
334
+ """A list of the pieces that make up this chain. Elements may
335
+ be either ``NombankSplitTreePointer`` or
336
+ ``NombankTreePointer`` pointers."""
337
+
338
+ def __str__(self):
339
+ return "*".join("%s" % p for p in self.pieces)
340
+
341
+ def __repr__(self):
342
+ return "<NombankChainTreePointer: %s>" % self
343
+
344
+ def select(self, tree):
345
+ if tree is None:
346
+ raise ValueError("Parse tree not available")
347
+ return Tree("*CHAIN*", [p.select(tree) for p in self.pieces])
348
+
349
+
350
+ class NombankSplitTreePointer(NombankPointer):
351
+ def __init__(self, pieces):
352
+ self.pieces = pieces
353
+ """A list of the pieces that make up this chain. Elements are
354
+ all ``NombankTreePointer`` pointers."""
355
+
356
+ def __str__(self):
357
+ return ",".join("%s" % p for p in self.pieces)
358
+
359
+ def __repr__(self):
360
+ return "<NombankSplitTreePointer: %s>" % self
361
+
362
+ def select(self, tree):
363
+ if tree is None:
364
+ raise ValueError("Parse tree not available")
365
+ return Tree("*SPLIT*", [p.select(tree) for p in self.pieces])
366
+
367
+
368
+ @total_ordering
369
+ class NombankTreePointer(NombankPointer):
370
+ """
371
+ wordnum:height*wordnum:height*...
372
+ wordnum:height,
373
+
374
+ """
375
+
376
+ def __init__(self, wordnum, height):
377
+ self.wordnum = wordnum
378
+ self.height = height
379
+
380
+ @staticmethod
381
+ def parse(s):
382
+ # Deal with chains (xx*yy*zz)
383
+ pieces = s.split("*")
384
+ if len(pieces) > 1:
385
+ return NombankChainTreePointer(
386
+ [NombankTreePointer.parse(elt) for elt in pieces]
387
+ )
388
+
389
+ # Deal with split args (xx,yy,zz)
390
+ pieces = s.split(",")
391
+ if len(pieces) > 1:
392
+ return NombankSplitTreePointer(
393
+ [NombankTreePointer.parse(elt) for elt in pieces]
394
+ )
395
+
396
+ # Deal with normal pointers.
397
+ pieces = s.split(":")
398
+ if len(pieces) != 2:
399
+ raise ValueError("bad nombank pointer %r" % s)
400
+ return NombankTreePointer(int(pieces[0]), int(pieces[1]))
401
+
402
+ def __str__(self):
403
+ return f"{self.wordnum}:{self.height}"
404
+
405
+ def __repr__(self):
406
+ return "NombankTreePointer(%d, %d)" % (self.wordnum, self.height)
407
+
408
+ def __eq__(self, other):
409
+ while isinstance(other, (NombankChainTreePointer, NombankSplitTreePointer)):
410
+ other = other.pieces[0]
411
+
412
+ if not isinstance(other, NombankTreePointer):
413
+ return self is other
414
+
415
+ return self.wordnum == other.wordnum and self.height == other.height
416
+
417
+ def __ne__(self, other):
418
+ return not self == other
419
+
420
+ def __lt__(self, other):
421
+ while isinstance(other, (NombankChainTreePointer, NombankSplitTreePointer)):
422
+ other = other.pieces[0]
423
+
424
+ if not isinstance(other, NombankTreePointer):
425
+ return id(self) < id(other)
426
+
427
+ return (self.wordnum, -self.height) < (other.wordnum, -other.height)
428
+
429
+ def select(self, tree):
430
+ if tree is None:
431
+ raise ValueError("Parse tree not available")
432
+ return tree[self.treepos(tree)]
433
+
434
+ def treepos(self, tree):
435
+ """
436
+ Convert this pointer to a standard 'tree position' pointer,
437
+ given that it points to the given tree.
438
+ """
439
+ if tree is None:
440
+ raise ValueError("Parse tree not available")
441
+ stack = [tree]
442
+ treepos = []
443
+
444
+ wordnum = 0
445
+ while True:
446
+ # tree node:
447
+ if isinstance(stack[-1], Tree):
448
+ # Select the next child.
449
+ if len(treepos) < len(stack):
450
+ treepos.append(0)
451
+ else:
452
+ treepos[-1] += 1
453
+ # Update the stack.
454
+ if treepos[-1] < len(stack[-1]):
455
+ stack.append(stack[-1][treepos[-1]])
456
+ else:
457
+ # End of node's child list: pop up a level.
458
+ stack.pop()
459
+ treepos.pop()
460
+ # word node:
461
+ else:
462
+ if wordnum == self.wordnum:
463
+ return tuple(treepos[: len(treepos) - self.height - 1])
464
+ else:
465
+ wordnum += 1
466
+ stack.pop()
lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Opinion Lexicon Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Pierpaolo Pantone <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for the Opinion Lexicon.
10
+
11
+ Opinion Lexicon information
12
+ ===========================
13
+
14
+ Authors: Minqing Hu and Bing Liu, 2004.
15
+ Department of Computer Science
16
+ University of Illinois at Chicago
17
+
18
+ Contact: Bing Liu, [email protected]
19
+ https://www.cs.uic.edu/~liub
20
+
21
+ Distributed with permission.
22
+
23
+ Related papers:
24
+
25
+ - Minqing Hu and Bing Liu. "Mining and summarizing customer reviews".
26
+ Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery
27
+ & Data Mining (KDD-04), Aug 22-25, 2004, Seattle, Washington, USA.
28
+
29
+ - Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and
30
+ Comparing Opinions on the Web". Proceedings of the 14th International World
31
+ Wide Web conference (WWW-2005), May 10-14, 2005, Chiba, Japan.
32
+ """
33
+
34
+ from nltk.corpus.reader import WordListCorpusReader
35
+ from nltk.corpus.reader.api import *
36
+
37
+
38
+ class IgnoreReadmeCorpusView(StreamBackedCorpusView):
39
+ """
40
+ This CorpusView is used to skip the initial readme block of the corpus.
41
+ """
42
+
43
+ def __init__(self, *args, **kwargs):
44
+ StreamBackedCorpusView.__init__(self, *args, **kwargs)
45
+ # open self._stream
46
+ self._open()
47
+ # skip the readme block
48
+ read_blankline_block(self._stream)
49
+ # Set the initial position to the current stream position
50
+ self._filepos = [self._stream.tell()]
51
+
52
+
53
+ class OpinionLexiconCorpusReader(WordListCorpusReader):
54
+ """
55
+ Reader for Liu and Hu opinion lexicon. Blank lines and readme are ignored.
56
+
57
+ >>> from nltk.corpus import opinion_lexicon
58
+ >>> opinion_lexicon.words()
59
+ ['2-faced', '2-faces', 'abnormal', 'abolish', ...]
60
+
61
+ The OpinionLexiconCorpusReader provides shortcuts to retrieve positive/negative
62
+ words:
63
+
64
+ >>> opinion_lexicon.negative()
65
+ ['2-faced', '2-faces', 'abnormal', 'abolish', ...]
66
+
67
+ Note that words from `words()` method are sorted by file id, not alphabetically:
68
+
69
+ >>> opinion_lexicon.words()[0:10] # doctest: +NORMALIZE_WHITESPACE
70
+ ['2-faced', '2-faces', 'abnormal', 'abolish', 'abominable', 'abominably',
71
+ 'abominate', 'abomination', 'abort', 'aborted']
72
+ >>> sorted(opinion_lexicon.words())[0:10] # doctest: +NORMALIZE_WHITESPACE
73
+ ['2-faced', '2-faces', 'a+', 'abnormal', 'abolish', 'abominable', 'abominably',
74
+ 'abominate', 'abomination', 'abort']
75
+ """
76
+
77
+ CorpusView = IgnoreReadmeCorpusView
78
+
79
+ def words(self, fileids=None):
80
+ """
81
+ Return all words in the opinion lexicon. Note that these words are not
82
+ sorted in alphabetical order.
83
+
84
+ :param fileids: a list or regexp specifying the ids of the files whose
85
+ words have to be returned.
86
+ :return: the given file(s) as a list of words and punctuation symbols.
87
+ :rtype: list(str)
88
+ """
89
+ if fileids is None:
90
+ fileids = self._fileids
91
+ elif isinstance(fileids, str):
92
+ fileids = [fileids]
93
+ return concat(
94
+ [
95
+ self.CorpusView(path, self._read_word_block, encoding=enc)
96
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
97
+ ]
98
+ )
99
+
100
+ def positive(self):
101
+ """
102
+ Return all positive words in alphabetical order.
103
+
104
+ :return: a list of positive words.
105
+ :rtype: list(str)
106
+ """
107
+ return self.words("positive-words.txt")
108
+
109
+ def negative(self):
110
+ """
111
+ Return all negative words in alphabetical order.
112
+
113
+ :return: a list of negative words.
114
+ :rtype: list(str)
115
+ """
116
+ return self.words("negative-words.txt")
117
+
118
+ def _read_word_block(self, stream):
119
+ words = []
120
+ for i in range(20): # Read 20 lines at a time.
121
+ line = stream.readline()
122
+ if not line:
123
+ continue
124
+ words.append(line.strip())
125
+ return words
lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Word List Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+
10
+ import re
11
+ from collections import defaultdict, namedtuple
12
+
13
+ from nltk.corpus.reader.api import *
14
+ from nltk.corpus.reader.util import *
15
+ from nltk.corpus.reader.wordlist import WordListCorpusReader
16
+ from nltk.tokenize import line_tokenize
17
+
18
+ PanlexLanguage = namedtuple(
19
+ "PanlexLanguage",
20
+ [
21
+ "panlex_uid", # (1) PanLex UID
22
+ "iso639", # (2) ISO 639 language code
23
+ "iso639_type", # (3) ISO 639 language type, see README
24
+ "script", # (4) normal scripts of expressions
25
+ "name", # (5) PanLex default name
26
+ "langvar_uid", # (6) UID of the language variety in which the default name is an expression
27
+ ],
28
+ )
29
+
30
+
31
+ class PanlexSwadeshCorpusReader(WordListCorpusReader):
32
+ """
33
+ This is a class to read the PanLex Swadesh list from
34
+
35
+ David Kamholz, Jonathan Pool, and Susan M. Colowick (2014).
36
+ PanLex: Building a Resource for Panlingual Lexical Translation.
37
+ In LREC. http://www.lrec-conf.org/proceedings/lrec2014/pdf/1029_Paper.pdf
38
+
39
+ License: CC0 1.0 Universal
40
+ https://creativecommons.org/publicdomain/zero/1.0/legalcode
41
+ """
42
+
43
+ def __init__(self, *args, **kwargs):
44
+ super().__init__(*args, **kwargs)
45
+ # Find the swadesh size using the fileids' path.
46
+ self.swadesh_size = re.match(r"swadesh([0-9].*)\/", self.fileids()[0]).group(1)
47
+ self._languages = {lang.panlex_uid: lang for lang in self.get_languages()}
48
+ self._macro_langauges = self.get_macrolanguages()
49
+
50
+ def license(self):
51
+ return "CC0 1.0 Universal"
52
+
53
+ def language_codes(self):
54
+ return self._languages.keys()
55
+
56
+ def get_languages(self):
57
+ for line in self.raw(f"langs{self.swadesh_size}.txt").split("\n"):
58
+ if not line.strip(): # Skip empty lines.
59
+ continue
60
+ yield PanlexLanguage(*line.strip().split("\t"))
61
+
62
+ def get_macrolanguages(self):
63
+ macro_langauges = defaultdict(list)
64
+ for lang in self._languages.values():
65
+ macro_langauges[lang.iso639].append(lang.panlex_uid)
66
+ return macro_langauges
67
+
68
+ def words_by_lang(self, lang_code):
69
+ """
70
+ :return: a list of list(str)
71
+ """
72
+ fileid = f"swadesh{self.swadesh_size}/{lang_code}.txt"
73
+ return [concept.split("\t") for concept in self.words(fileid)]
74
+
75
+ def words_by_iso639(self, iso63_code):
76
+ """
77
+ :return: a list of list(str)
78
+ """
79
+ fileids = [
80
+ f"swadesh{self.swadesh_size}/{lang_code}.txt"
81
+ for lang_code in self._macro_langauges[iso63_code]
82
+ ]
83
+ return [
84
+ concept.split("\t") for fileid in fileids for concept in self.words(fileid)
85
+ ]
86
+
87
+ def entries(self, fileids=None):
88
+ """
89
+ :return: a tuple of words for the specified fileids.
90
+ """
91
+ if not fileids:
92
+ fileids = self.fileids()
93
+
94
+ wordlists = [self.words(f) for f in fileids]
95
+ return list(zip(*wordlists))
lib/python3.10/site-packages/nltk/corpus/reader/plaintext.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Plaintext Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # Nitin Madnani <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ A reader for corpora that consist of plaintext documents.
12
+ """
13
+
14
+ import nltk.data
15
+ from nltk.corpus.reader.api import *
16
+ from nltk.corpus.reader.util import *
17
+ from nltk.tokenize import *
18
+
19
+
20
+ class PlaintextCorpusReader(CorpusReader):
21
+ """
22
+ Reader for corpora that consist of plaintext documents. Paragraphs
23
+ are assumed to be split using blank lines. Sentences and words can
24
+ be tokenized using the default tokenizers, or by custom tokenizers
25
+ specified as parameters to the constructor.
26
+
27
+ This corpus reader can be customized (e.g., to skip preface
28
+ sections of specific document formats) by creating a subclass and
29
+ overriding the ``CorpusView`` class variable.
30
+ """
31
+
32
+ CorpusView = StreamBackedCorpusView
33
+ """The corpus view class used by this reader. Subclasses of
34
+ ``PlaintextCorpusReader`` may specify alternative corpus view
35
+ classes (e.g., to skip the preface sections of documents.)"""
36
+
37
+ def __init__(
38
+ self,
39
+ root,
40
+ fileids,
41
+ word_tokenizer=WordPunctTokenizer(),
42
+ sent_tokenizer=nltk.data.LazyLoader("tokenizers/punkt/english.pickle"),
43
+ para_block_reader=read_blankline_block,
44
+ encoding="utf8",
45
+ ):
46
+ r"""
47
+ Construct a new plaintext corpus reader for a set of documents
48
+ located at the given root directory. Example usage:
49
+
50
+ >>> root = '/usr/local/share/nltk_data/corpora/webtext/'
51
+ >>> reader = PlaintextCorpusReader(root, '.*\.txt') # doctest: +SKIP
52
+
53
+ :param root: The root directory for this corpus.
54
+ :param fileids: A list or regexp specifying the fileids in this corpus.
55
+ :param word_tokenizer: Tokenizer for breaking sentences or
56
+ paragraphs into words.
57
+ :param sent_tokenizer: Tokenizer for breaking paragraphs
58
+ into words.
59
+ :param para_block_reader: The block reader used to divide the
60
+ corpus into paragraph blocks.
61
+ """
62
+ CorpusReader.__init__(self, root, fileids, encoding)
63
+ self._word_tokenizer = word_tokenizer
64
+ self._sent_tokenizer = sent_tokenizer
65
+ self._para_block_reader = para_block_reader
66
+
67
+ def words(self, fileids=None):
68
+ """
69
+ :return: the given file(s) as a list of words
70
+ and punctuation symbols.
71
+ :rtype: list(str)
72
+ """
73
+ return concat(
74
+ [
75
+ self.CorpusView(path, self._read_word_block, encoding=enc)
76
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
77
+ ]
78
+ )
79
+
80
+ def sents(self, fileids=None):
81
+ """
82
+ :return: the given file(s) as a list of
83
+ sentences or utterances, each encoded as a list of word
84
+ strings.
85
+ :rtype: list(list(str))
86
+ """
87
+ if self._sent_tokenizer is None:
88
+ raise ValueError("No sentence tokenizer for this corpus")
89
+
90
+ return concat(
91
+ [
92
+ self.CorpusView(path, self._read_sent_block, encoding=enc)
93
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
94
+ ]
95
+ )
96
+
97
+ def paras(self, fileids=None):
98
+ """
99
+ :return: the given file(s) as a list of
100
+ paragraphs, each encoded as a list of sentences, which are
101
+ in turn encoded as lists of word strings.
102
+ :rtype: list(list(list(str)))
103
+ """
104
+ if self._sent_tokenizer is None:
105
+ raise ValueError("No sentence tokenizer for this corpus")
106
+
107
+ return concat(
108
+ [
109
+ self.CorpusView(path, self._read_para_block, encoding=enc)
110
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
111
+ ]
112
+ )
113
+
114
+ def _read_word_block(self, stream):
115
+ words = []
116
+ for i in range(20): # Read 20 lines at a time.
117
+ words.extend(self._word_tokenizer.tokenize(stream.readline()))
118
+ return words
119
+
120
+ def _read_sent_block(self, stream):
121
+ sents = []
122
+ for para in self._para_block_reader(stream):
123
+ sents.extend(
124
+ [
125
+ self._word_tokenizer.tokenize(sent)
126
+ for sent in self._sent_tokenizer.tokenize(para)
127
+ ]
128
+ )
129
+ return sents
130
+
131
+ def _read_para_block(self, stream):
132
+ paras = []
133
+ for para in self._para_block_reader(stream):
134
+ paras.append(
135
+ [
136
+ self._word_tokenizer.tokenize(sent)
137
+ for sent in self._sent_tokenizer.tokenize(para)
138
+ ]
139
+ )
140
+ return paras
141
+
142
+
143
+ class CategorizedPlaintextCorpusReader(CategorizedCorpusReader, PlaintextCorpusReader):
144
+ """
145
+ A reader for plaintext corpora whose documents are divided into
146
+ categories based on their file identifiers.
147
+ """
148
+
149
+ def __init__(self, *args, **kwargs):
150
+ """
151
+ Initialize the corpus reader. Categorization arguments
152
+ (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
153
+ the ``CategorizedCorpusReader`` constructor. The remaining arguments
154
+ are passed to the ``PlaintextCorpusReader`` constructor.
155
+ """
156
+ CategorizedCorpusReader.__init__(self, kwargs)
157
+ PlaintextCorpusReader.__init__(self, *args, **kwargs)
158
+
159
+
160
+ # FIXME: Is there a better way? How to not hardcode this?
161
+ # Possibly, add a language kwargs to CategorizedPlaintextCorpusReader to
162
+ # override the `sent_tokenizer`.
163
+ class PortugueseCategorizedPlaintextCorpusReader(CategorizedPlaintextCorpusReader):
164
+ def __init__(self, *args, **kwargs):
165
+ CategorizedCorpusReader.__init__(self, kwargs)
166
+ kwargs["sent_tokenizer"] = nltk.data.LazyLoader(
167
+ "tokenizers/punkt/portuguese.pickle"
168
+ )
169
+ PlaintextCorpusReader.__init__(self, *args, **kwargs)
170
+
171
+
172
+ class EuroparlCorpusReader(PlaintextCorpusReader):
173
+
174
+ """
175
+ Reader for Europarl corpora that consist of plaintext documents.
176
+ Documents are divided into chapters instead of paragraphs as
177
+ for regular plaintext documents. Chapters are separated using blank
178
+ lines. Everything is inherited from ``PlaintextCorpusReader`` except
179
+ that:
180
+
181
+ - Since the corpus is pre-processed and pre-tokenized, the
182
+ word tokenizer should just split the line at whitespaces.
183
+ - For the same reason, the sentence tokenizer should just
184
+ split the paragraph at line breaks.
185
+ - There is a new 'chapters()' method that returns chapters instead
186
+ instead of paragraphs.
187
+ - The 'paras()' method inherited from PlaintextCorpusReader is
188
+ made non-functional to remove any confusion between chapters
189
+ and paragraphs for Europarl.
190
+ """
191
+
192
+ def _read_word_block(self, stream):
193
+ words = []
194
+ for i in range(20): # Read 20 lines at a time.
195
+ words.extend(stream.readline().split())
196
+ return words
197
+
198
+ def _read_sent_block(self, stream):
199
+ sents = []
200
+ for para in self._para_block_reader(stream):
201
+ sents.extend([sent.split() for sent in para.splitlines()])
202
+ return sents
203
+
204
+ def _read_para_block(self, stream):
205
+ paras = []
206
+ for para in self._para_block_reader(stream):
207
+ paras.append([sent.split() for sent in para.splitlines()])
208
+ return paras
209
+
210
+ def chapters(self, fileids=None):
211
+ """
212
+ :return: the given file(s) as a list of
213
+ chapters, each encoded as a list of sentences, which are
214
+ in turn encoded as lists of word strings.
215
+ :rtype: list(list(list(str)))
216
+ """
217
+ return concat(
218
+ [
219
+ self.CorpusView(fileid, self._read_para_block, encoding=enc)
220
+ for (fileid, enc) in self.abspaths(fileids, True)
221
+ ]
222
+ )
223
+
224
+ def paras(self, fileids=None):
225
+ raise NotImplementedError(
226
+ "The Europarl corpus reader does not support paragraphs. Please use chapters() instead."
227
+ )
lib/python3.10/site-packages/nltk/corpus/reader/ppattach.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: PP Attachment Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Read lines from the Prepositional Phrase Attachment Corpus.
11
+
12
+ The PP Attachment Corpus contains several files having the format:
13
+
14
+ sentence_id verb noun1 preposition noun2 attachment
15
+
16
+ For example:
17
+
18
+ 42960 gives authority to administration V
19
+ 46742 gives inventors of microchip N
20
+
21
+ The PP attachment is to the verb phrase (V) or noun phrase (N), i.e.:
22
+
23
+ (VP gives (NP authority) (PP to administration))
24
+ (VP gives (NP inventors (PP of microchip)))
25
+
26
+ The corpus contains the following files:
27
+
28
+ training: training set
29
+ devset: development test set, used for algorithm development.
30
+ test: test set, used to report results
31
+ bitstrings: word classes derived from Mutual Information Clustering for the Wall Street Journal.
32
+
33
+ Ratnaparkhi, Adwait (1994). A Maximum Entropy Model for Prepositional
34
+ Phrase Attachment. Proceedings of the ARPA Human Language Technology
35
+ Conference. [http://www.cis.upenn.edu/~adwait/papers/hlt94.ps]
36
+
37
+ The PP Attachment Corpus is distributed with NLTK with the permission
38
+ of the author.
39
+ """
40
+
41
+ from nltk.corpus.reader.api import *
42
+ from nltk.corpus.reader.util import *
43
+
44
+
45
+ class PPAttachment:
46
+ def __init__(self, sent, verb, noun1, prep, noun2, attachment):
47
+ self.sent = sent
48
+ self.verb = verb
49
+ self.noun1 = noun1
50
+ self.prep = prep
51
+ self.noun2 = noun2
52
+ self.attachment = attachment
53
+
54
+ def __repr__(self):
55
+ return (
56
+ "PPAttachment(sent=%r, verb=%r, noun1=%r, prep=%r, "
57
+ "noun2=%r, attachment=%r)"
58
+ % (self.sent, self.verb, self.noun1, self.prep, self.noun2, self.attachment)
59
+ )
60
+
61
+
62
+ class PPAttachmentCorpusReader(CorpusReader):
63
+ """
64
+ sentence_id verb noun1 preposition noun2 attachment
65
+ """
66
+
67
+ def attachments(self, fileids):
68
+ return concat(
69
+ [
70
+ StreamBackedCorpusView(fileid, self._read_obj_block, encoding=enc)
71
+ for (fileid, enc) in self.abspaths(fileids, True)
72
+ ]
73
+ )
74
+
75
+ def tuples(self, fileids):
76
+ return concat(
77
+ [
78
+ StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc)
79
+ for (fileid, enc) in self.abspaths(fileids, True)
80
+ ]
81
+ )
82
+
83
+ def _read_tuple_block(self, stream):
84
+ line = stream.readline()
85
+ if line:
86
+ return [tuple(line.split())]
87
+ else:
88
+ return []
89
+
90
+ def _read_obj_block(self, stream):
91
+ line = stream.readline()
92
+ if line:
93
+ return [PPAttachment(*line.split())]
94
+ else:
95
+ return []
lib/python3.10/site-packages/nltk/corpus/reader/rte.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: RTE Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Corpus reader for the Recognizing Textual Entailment (RTE) Challenge Corpora.
10
+
11
+ The files were taken from the RTE1, RTE2 and RTE3 datasets and the files
12
+ were regularized.
13
+
14
+ Filenames are of the form rte*_dev.xml and rte*_test.xml. The latter are the
15
+ gold standard annotated files.
16
+
17
+ Each entailment corpus is a list of 'text'/'hypothesis' pairs. The following
18
+ example is taken from RTE3::
19
+
20
+ <pair id="1" entailment="YES" task="IE" length="short" >
21
+
22
+ <t>The sale was made to pay Yukos' US$ 27.5 billion tax bill,
23
+ Yuganskneftegaz was originally sold for US$ 9.4 billion to a little known
24
+ company Baikalfinansgroup which was later bought by the Russian
25
+ state-owned oil company Rosneft .</t>
26
+
27
+ <h>Baikalfinansgroup was sold to Rosneft.</h>
28
+ </pair>
29
+
30
+ In order to provide globally unique IDs for each pair, a new attribute
31
+ ``challenge`` has been added to the root element ``entailment-corpus`` of each
32
+ file, taking values 1, 2 or 3. The GID is formatted 'm-n', where 'm' is the
33
+ challenge number and 'n' is the pair ID.
34
+ """
35
+ from nltk.corpus.reader.api import *
36
+ from nltk.corpus.reader.util import *
37
+ from nltk.corpus.reader.xmldocs import *
38
+
39
+
40
+ def norm(value_string):
41
+ """
42
+ Normalize the string value in an RTE pair's ``value`` or ``entailment``
43
+ attribute as an integer (1, 0).
44
+
45
+ :param value_string: the label used to classify a text/hypothesis pair
46
+ :type value_string: str
47
+ :rtype: int
48
+ """
49
+
50
+ valdict = {"TRUE": 1, "FALSE": 0, "YES": 1, "NO": 0}
51
+ return valdict[value_string.upper()]
52
+
53
+
54
+ class RTEPair:
55
+ """
56
+ Container for RTE text-hypothesis pairs.
57
+
58
+ The entailment relation is signalled by the ``value`` attribute in RTE1, and by
59
+ ``entailment`` in RTE2 and RTE3. These both get mapped on to the ``entailment``
60
+ attribute of this class.
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ pair,
66
+ challenge=None,
67
+ id=None,
68
+ text=None,
69
+ hyp=None,
70
+ value=None,
71
+ task=None,
72
+ length=None,
73
+ ):
74
+ """
75
+ :param challenge: version of the RTE challenge (i.e., RTE1, RTE2 or RTE3)
76
+ :param id: identifier for the pair
77
+ :param text: the text component of the pair
78
+ :param hyp: the hypothesis component of the pair
79
+ :param value: classification label for the pair
80
+ :param task: attribute for the particular NLP task that the data was drawn from
81
+ :param length: attribute for the length of the text of the pair
82
+ """
83
+ self.challenge = challenge
84
+ self.id = pair.attrib["id"]
85
+ self.gid = f"{self.challenge}-{self.id}"
86
+ self.text = pair[0].text
87
+ self.hyp = pair[1].text
88
+
89
+ if "value" in pair.attrib:
90
+ self.value = norm(pair.attrib["value"])
91
+ elif "entailment" in pair.attrib:
92
+ self.value = norm(pair.attrib["entailment"])
93
+ else:
94
+ self.value = value
95
+ if "task" in pair.attrib:
96
+ self.task = pair.attrib["task"]
97
+ else:
98
+ self.task = task
99
+ if "length" in pair.attrib:
100
+ self.length = pair.attrib["length"]
101
+ else:
102
+ self.length = length
103
+
104
+ def __repr__(self):
105
+ if self.challenge:
106
+ return f"<RTEPair: gid={self.challenge}-{self.id}>"
107
+ else:
108
+ return "<RTEPair: id=%s>" % self.id
109
+
110
+
111
+ class RTECorpusReader(XMLCorpusReader):
112
+ """
113
+ Corpus reader for corpora in RTE challenges.
114
+
115
+ This is just a wrapper around the XMLCorpusReader. See module docstring above for the expected
116
+ structure of input documents.
117
+ """
118
+
119
+ def _read_etree(self, doc):
120
+ """
121
+ Map the XML input into an RTEPair.
122
+
123
+ This uses the ``getiterator()`` method from the ElementTree package to
124
+ find all the ``<pair>`` elements.
125
+
126
+ :param doc: a parsed XML document
127
+ :rtype: list(RTEPair)
128
+ """
129
+ try:
130
+ challenge = doc.attrib["challenge"]
131
+ except KeyError:
132
+ challenge = None
133
+ pairiter = doc.iter("pair")
134
+ return [RTEPair(pair, challenge=challenge) for pair in pairiter]
135
+
136
+ def pairs(self, fileids):
137
+ """
138
+ Build a list of RTEPairs from a RTE corpus.
139
+
140
+ :param fileids: a list of RTE corpus fileids
141
+ :type: list
142
+ :rtype: list(RTEPair)
143
+ """
144
+ if isinstance(fileids, str):
145
+ fileids = [fileids]
146
+ return concat([self._read_etree(self.xml(fileid)) for fileid in fileids])
lib/python3.10/site-packages/nltk/corpus/reader/senseval.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Senseval 2 Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # Steven Bird <[email protected]> (modifications)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Read from the Senseval 2 Corpus.
11
+
12
+ SENSEVAL [http://www.senseval.org/]
13
+ Evaluation exercises for Word Sense Disambiguation.
14
+ Organized by ACL-SIGLEX [https://www.siglex.org/]
15
+
16
+ Prepared by Ted Pedersen <[email protected]>, University of Minnesota,
17
+ https://www.d.umn.edu/~tpederse/data.html
18
+ Distributed with permission.
19
+
20
+ The NLTK version of the Senseval 2 files uses well-formed XML.
21
+ Each instance of the ambiguous words "hard", "interest", "line", and "serve"
22
+ is tagged with a sense identifier, and supplied with context.
23
+ """
24
+
25
+ import re
26
+ from xml.etree import ElementTree
27
+
28
+ from nltk.corpus.reader.api import *
29
+ from nltk.corpus.reader.util import *
30
+ from nltk.tokenize import *
31
+
32
+
33
+ class SensevalInstance:
34
+ def __init__(self, word, position, context, senses):
35
+ self.word = word
36
+ self.senses = tuple(senses)
37
+ self.position = position
38
+ self.context = context
39
+
40
+ def __repr__(self):
41
+ return "SensevalInstance(word=%r, position=%r, " "context=%r, senses=%r)" % (
42
+ self.word,
43
+ self.position,
44
+ self.context,
45
+ self.senses,
46
+ )
47
+
48
+
49
+ class SensevalCorpusReader(CorpusReader):
50
+ def instances(self, fileids=None):
51
+ return concat(
52
+ [
53
+ SensevalCorpusView(fileid, enc)
54
+ for (fileid, enc) in self.abspaths(fileids, True)
55
+ ]
56
+ )
57
+
58
+ def _entry(self, tree):
59
+ elts = []
60
+ for lexelt in tree.findall("lexelt"):
61
+ for inst in lexelt.findall("instance"):
62
+ sense = inst[0].attrib["senseid"]
63
+ context = [(w.text, w.attrib["pos"]) for w in inst[1]]
64
+ elts.append((sense, context))
65
+ return elts
66
+
67
+
68
+ class SensevalCorpusView(StreamBackedCorpusView):
69
+ def __init__(self, fileid, encoding):
70
+ StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
71
+
72
+ self._word_tokenizer = WhitespaceTokenizer()
73
+ self._lexelt_starts = [0] # list of streampos
74
+ self._lexelts = [None] # list of lexelt names
75
+
76
+ def read_block(self, stream):
77
+ # Decide which lexical element we're in.
78
+ lexelt_num = bisect.bisect_right(self._lexelt_starts, stream.tell()) - 1
79
+ lexelt = self._lexelts[lexelt_num]
80
+
81
+ instance_lines = []
82
+ in_instance = False
83
+ while True:
84
+ line = stream.readline()
85
+ if line == "":
86
+ assert instance_lines == []
87
+ return []
88
+
89
+ # Start of a lexical element?
90
+ if line.lstrip().startswith("<lexelt"):
91
+ lexelt_num += 1
92
+ m = re.search("item=(\"[^\"]+\"|'[^']+')", line)
93
+ assert m is not None # <lexelt> has no 'item=...'
94
+ lexelt = m.group(1)[1:-1]
95
+ if lexelt_num < len(self._lexelts):
96
+ assert lexelt == self._lexelts[lexelt_num]
97
+ else:
98
+ self._lexelts.append(lexelt)
99
+ self._lexelt_starts.append(stream.tell())
100
+
101
+ # Start of an instance?
102
+ if line.lstrip().startswith("<instance"):
103
+ assert instance_lines == []
104
+ in_instance = True
105
+
106
+ # Body of an instance?
107
+ if in_instance:
108
+ instance_lines.append(line)
109
+
110
+ # End of an instance?
111
+ if line.lstrip().startswith("</instance"):
112
+ xml_block = "\n".join(instance_lines)
113
+ xml_block = _fixXML(xml_block)
114
+ inst = ElementTree.fromstring(xml_block)
115
+ return [self._parse_instance(inst, lexelt)]
116
+
117
+ def _parse_instance(self, instance, lexelt):
118
+ senses = []
119
+ context = []
120
+ position = None
121
+ for child in instance:
122
+ if child.tag == "answer":
123
+ senses.append(child.attrib["senseid"])
124
+ elif child.tag == "context":
125
+ context += self._word_tokenizer.tokenize(child.text)
126
+ for cword in child:
127
+ if cword.tag == "compound":
128
+ cword = cword[0] # is this ok to do?
129
+
130
+ if cword.tag == "head":
131
+ # Some santiy checks:
132
+ assert position is None, "head specified twice"
133
+ assert cword.text.strip() or len(cword) == 1
134
+ assert not (cword.text.strip() and len(cword) == 1)
135
+ # Record the position of the head:
136
+ position = len(context)
137
+ # Add on the head word itself:
138
+ if cword.text.strip():
139
+ context.append(cword.text.strip())
140
+ elif cword[0].tag == "wf":
141
+ context.append((cword[0].text, cword[0].attrib["pos"]))
142
+ if cword[0].tail:
143
+ context += self._word_tokenizer.tokenize(cword[0].tail)
144
+ else:
145
+ assert False, "expected CDATA or wf in <head>"
146
+ elif cword.tag == "wf":
147
+ context.append((cword.text, cword.attrib["pos"]))
148
+ elif cword.tag == "s":
149
+ pass # Sentence boundary marker.
150
+
151
+ else:
152
+ print("ACK", cword.tag)
153
+ assert False, "expected CDATA or <wf> or <head>"
154
+ if cword.tail:
155
+ context += self._word_tokenizer.tokenize(cword.tail)
156
+ else:
157
+ assert False, "unexpected tag %s" % child.tag
158
+ return SensevalInstance(lexelt, position, context, senses)
159
+
160
+
161
+ def _fixXML(text):
162
+ """
163
+ Fix the various issues with Senseval pseudo-XML.
164
+ """
165
+ # <~> or <^> => ~ or ^
166
+ text = re.sub(r"<([~\^])>", r"\1", text)
167
+ # fix lone &
168
+ text = re.sub(r"(\s+)\&(\s+)", r"\1&amp;\2", text)
169
+ # fix """
170
+ text = re.sub(r'"""', "'\"'", text)
171
+ # fix <s snum=dd> => <s snum="dd"/>
172
+ text = re.sub(r'(<[^<]*snum=)([^">]+)>', r'\1"\2"/>', text)
173
+ # fix foreign word tag
174
+ text = re.sub(r"<\&frasl>\s*<p[^>]*>", "FRASL", text)
175
+ # remove <&I .>
176
+ text = re.sub(r"<\&I[^>]*>", "", text)
177
+ # fix <{word}>
178
+ text = re.sub(r"<{([^}]+)}>", r"\1", text)
179
+ # remove <@>, <p>, </p>
180
+ text = re.sub(r"<(@|/?p)>", r"", text)
181
+ # remove <&M .> and <&T .> and <&Ms .>
182
+ text = re.sub(r"<&\w+ \.>", r"", text)
183
+ # remove <!DOCTYPE... > lines
184
+ text = re.sub(r"<!DOCTYPE[^>]*>", r"", text)
185
+ # remove <[hi]> and <[/p]> etc
186
+ text = re.sub(r"<\[\/?[^>]+\]*>", r"", text)
187
+ # take the thing out of the brackets: <&hellip;>
188
+ text = re.sub(r"<(\&\w+;)>", r"\1", text)
189
+ # and remove the & for those patterns that aren't regular XML
190
+ text = re.sub(r"&(?!amp|gt|lt|apos|quot)", r"", text)
191
+ # fix 'abc <p="foo"/>' style tags - now <wf pos="foo">abc</wf>
192
+ text = re.sub(
193
+ r'[ \t]*([^<>\s]+?)[ \t]*<p="([^"]*"?)"/>', r' <wf pos="\2">\1</wf>', text
194
+ )
195
+ text = re.sub(r'\s*"\s*<p=\'"\'/>', " <wf pos='\"'>\"</wf>", text)
196
+ return text
lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Switchboard Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ import re
8
+
9
+ from nltk.corpus.reader.api import *
10
+ from nltk.corpus.reader.util import *
11
+ from nltk.tag import map_tag, str2tuple
12
+
13
+
14
+ class SwitchboardTurn(list):
15
+ """
16
+ A specialized list object used to encode switchboard utterances.
17
+ The elements of the list are the words in the utterance; and two
18
+ attributes, ``speaker`` and ``id``, are provided to retrieve the
19
+ spearker identifier and utterance id. Note that utterance ids
20
+ are only unique within a given discourse.
21
+ """
22
+
23
+ def __init__(self, words, speaker, id):
24
+ list.__init__(self, words)
25
+ self.speaker = speaker
26
+ self.id = int(id)
27
+
28
+ def __repr__(self):
29
+ if len(self) == 0:
30
+ text = ""
31
+ elif isinstance(self[0], tuple):
32
+ text = " ".join("%s/%s" % w for w in self)
33
+ else:
34
+ text = " ".join(self)
35
+ return f"<{self.speaker}.{self.id}: {text!r}>"
36
+
37
+
38
+ class SwitchboardCorpusReader(CorpusReader):
39
+ _FILES = ["tagged"]
40
+ # Use the "tagged" file even for non-tagged data methods, since
41
+ # it's tokenized.
42
+
43
+ def __init__(self, root, tagset=None):
44
+ CorpusReader.__init__(self, root, self._FILES)
45
+ self._tagset = tagset
46
+
47
+ def words(self):
48
+ return StreamBackedCorpusView(self.abspath("tagged"), self._words_block_reader)
49
+
50
+ def tagged_words(self, tagset=None):
51
+ def tagged_words_block_reader(stream):
52
+ return self._tagged_words_block_reader(stream, tagset)
53
+
54
+ return StreamBackedCorpusView(self.abspath("tagged"), tagged_words_block_reader)
55
+
56
+ def turns(self):
57
+ return StreamBackedCorpusView(self.abspath("tagged"), self._turns_block_reader)
58
+
59
+ def tagged_turns(self, tagset=None):
60
+ def tagged_turns_block_reader(stream):
61
+ return self._tagged_turns_block_reader(stream, tagset)
62
+
63
+ return StreamBackedCorpusView(self.abspath("tagged"), tagged_turns_block_reader)
64
+
65
+ def discourses(self):
66
+ return StreamBackedCorpusView(
67
+ self.abspath("tagged"), self._discourses_block_reader
68
+ )
69
+
70
+ def tagged_discourses(self, tagset=False):
71
+ def tagged_discourses_block_reader(stream):
72
+ return self._tagged_discourses_block_reader(stream, tagset)
73
+
74
+ return StreamBackedCorpusView(
75
+ self.abspath("tagged"), tagged_discourses_block_reader
76
+ )
77
+
78
+ def _discourses_block_reader(self, stream):
79
+ # returns at most 1 discourse. (The other methods depend on this.)
80
+ return [
81
+ [
82
+ self._parse_utterance(u, include_tag=False)
83
+ for b in read_blankline_block(stream)
84
+ for u in b.split("\n")
85
+ if u.strip()
86
+ ]
87
+ ]
88
+
89
+ def _tagged_discourses_block_reader(self, stream, tagset=None):
90
+ # returns at most 1 discourse. (The other methods depend on this.)
91
+ return [
92
+ [
93
+ self._parse_utterance(u, include_tag=True, tagset=tagset)
94
+ for b in read_blankline_block(stream)
95
+ for u in b.split("\n")
96
+ if u.strip()
97
+ ]
98
+ ]
99
+
100
+ def _turns_block_reader(self, stream):
101
+ return self._discourses_block_reader(stream)[0]
102
+
103
+ def _tagged_turns_block_reader(self, stream, tagset=None):
104
+ return self._tagged_discourses_block_reader(stream, tagset)[0]
105
+
106
+ def _words_block_reader(self, stream):
107
+ return sum(self._discourses_block_reader(stream)[0], [])
108
+
109
+ def _tagged_words_block_reader(self, stream, tagset=None):
110
+ return sum(self._tagged_discourses_block_reader(stream, tagset)[0], [])
111
+
112
+ _UTTERANCE_RE = re.compile(r"(\w+)\.(\d+)\:\s*(.*)")
113
+ _SEP = "/"
114
+
115
+ def _parse_utterance(self, utterance, include_tag, tagset=None):
116
+ m = self._UTTERANCE_RE.match(utterance)
117
+ if m is None:
118
+ raise ValueError("Bad utterance %r" % utterance)
119
+ speaker, id, text = m.groups()
120
+ words = [str2tuple(s, self._SEP) for s in text.split()]
121
+ if not include_tag:
122
+ words = [w for (w, t) in words]
123
+ elif tagset and tagset != self._tagset:
124
+ words = [(w, map_tag(self._tagset, tagset, t)) for (w, t) in words]
125
+ return SwitchboardTurn(words, speaker, id)
lib/python3.10/site-packages/nltk/corpus/reader/tagged.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Tagged Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Jacob Perkins <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ A reader for corpora whose documents contain part-of-speech-tagged words.
12
+ """
13
+
14
+ import os
15
+
16
+ from nltk.corpus.reader.api import *
17
+ from nltk.corpus.reader.timit import read_timit_block
18
+ from nltk.corpus.reader.util import *
19
+ from nltk.tag import map_tag, str2tuple
20
+ from nltk.tokenize import *
21
+
22
+
23
+ class TaggedCorpusReader(CorpusReader):
24
+ """
25
+ Reader for simple part-of-speech tagged corpora. Paragraphs are
26
+ assumed to be split using blank lines. Sentences and words can be
27
+ tokenized using the default tokenizers, or by custom tokenizers
28
+ specified as parameters to the constructor. Words are parsed
29
+ using ``nltk.tag.str2tuple``. By default, ``'/'`` is used as the
30
+ separator. I.e., words should have the form::
31
+
32
+ word1/tag1 word2/tag2 word3/tag3 ...
33
+
34
+ But custom separators may be specified as parameters to the
35
+ constructor. Part of speech tags are case-normalized to upper
36
+ case.
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ root,
42
+ fileids,
43
+ sep="/",
44
+ word_tokenizer=WhitespaceTokenizer(),
45
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
46
+ para_block_reader=read_blankline_block,
47
+ encoding="utf8",
48
+ tagset=None,
49
+ ):
50
+ """
51
+ Construct a new Tagged Corpus reader for a set of documents
52
+ located at the given root directory. Example usage:
53
+
54
+ >>> root = '/...path to corpus.../'
55
+ >>> reader = TaggedCorpusReader(root, '.*', '.txt') # doctest: +SKIP
56
+
57
+ :param root: The root directory for this corpus.
58
+ :param fileids: A list or regexp specifying the fileids in this corpus.
59
+ """
60
+ CorpusReader.__init__(self, root, fileids, encoding)
61
+ self._sep = sep
62
+ self._word_tokenizer = word_tokenizer
63
+ self._sent_tokenizer = sent_tokenizer
64
+ self._para_block_reader = para_block_reader
65
+ self._tagset = tagset
66
+
67
+ def words(self, fileids=None):
68
+ """
69
+ :return: the given file(s) as a list of words
70
+ and punctuation symbols.
71
+ :rtype: list(str)
72
+ """
73
+ return concat(
74
+ [
75
+ TaggedCorpusView(
76
+ fileid,
77
+ enc,
78
+ False,
79
+ False,
80
+ False,
81
+ self._sep,
82
+ self._word_tokenizer,
83
+ self._sent_tokenizer,
84
+ self._para_block_reader,
85
+ None,
86
+ )
87
+ for (fileid, enc) in self.abspaths(fileids, True)
88
+ ]
89
+ )
90
+
91
+ def sents(self, fileids=None):
92
+ """
93
+ :return: the given file(s) as a list of
94
+ sentences or utterances, each encoded as a list of word
95
+ strings.
96
+ :rtype: list(list(str))
97
+ """
98
+ return concat(
99
+ [
100
+ TaggedCorpusView(
101
+ fileid,
102
+ enc,
103
+ False,
104
+ True,
105
+ False,
106
+ self._sep,
107
+ self._word_tokenizer,
108
+ self._sent_tokenizer,
109
+ self._para_block_reader,
110
+ None,
111
+ )
112
+ for (fileid, enc) in self.abspaths(fileids, True)
113
+ ]
114
+ )
115
+
116
+ def paras(self, fileids=None):
117
+ """
118
+ :return: the given file(s) as a list of
119
+ paragraphs, each encoded as a list of sentences, which are
120
+ in turn encoded as lists of word strings.
121
+ :rtype: list(list(list(str)))
122
+ """
123
+ return concat(
124
+ [
125
+ TaggedCorpusView(
126
+ fileid,
127
+ enc,
128
+ False,
129
+ True,
130
+ True,
131
+ self._sep,
132
+ self._word_tokenizer,
133
+ self._sent_tokenizer,
134
+ self._para_block_reader,
135
+ None,
136
+ )
137
+ for (fileid, enc) in self.abspaths(fileids, True)
138
+ ]
139
+ )
140
+
141
+ def tagged_words(self, fileids=None, tagset=None):
142
+ """
143
+ :return: the given file(s) as a list of tagged
144
+ words and punctuation symbols, encoded as tuples
145
+ ``(word,tag)``.
146
+ :rtype: list(tuple(str,str))
147
+ """
148
+ if tagset and tagset != self._tagset:
149
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
150
+ else:
151
+ tag_mapping_function = None
152
+ return concat(
153
+ [
154
+ TaggedCorpusView(
155
+ fileid,
156
+ enc,
157
+ True,
158
+ False,
159
+ False,
160
+ self._sep,
161
+ self._word_tokenizer,
162
+ self._sent_tokenizer,
163
+ self._para_block_reader,
164
+ tag_mapping_function,
165
+ )
166
+ for (fileid, enc) in self.abspaths(fileids, True)
167
+ ]
168
+ )
169
+
170
+ def tagged_sents(self, fileids=None, tagset=None):
171
+ """
172
+ :return: the given file(s) as a list of
173
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
174
+
175
+ :rtype: list(list(tuple(str,str)))
176
+ """
177
+ if tagset and tagset != self._tagset:
178
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
179
+ else:
180
+ tag_mapping_function = None
181
+ return concat(
182
+ [
183
+ TaggedCorpusView(
184
+ fileid,
185
+ enc,
186
+ True,
187
+ True,
188
+ False,
189
+ self._sep,
190
+ self._word_tokenizer,
191
+ self._sent_tokenizer,
192
+ self._para_block_reader,
193
+ tag_mapping_function,
194
+ )
195
+ for (fileid, enc) in self.abspaths(fileids, True)
196
+ ]
197
+ )
198
+
199
+ def tagged_paras(self, fileids=None, tagset=None):
200
+ """
201
+ :return: the given file(s) as a list of
202
+ paragraphs, each encoded as a list of sentences, which are
203
+ in turn encoded as lists of ``(word,tag)`` tuples.
204
+ :rtype: list(list(list(tuple(str,str))))
205
+ """
206
+ if tagset and tagset != self._tagset:
207
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
208
+ else:
209
+ tag_mapping_function = None
210
+ return concat(
211
+ [
212
+ TaggedCorpusView(
213
+ fileid,
214
+ enc,
215
+ True,
216
+ True,
217
+ True,
218
+ self._sep,
219
+ self._word_tokenizer,
220
+ self._sent_tokenizer,
221
+ self._para_block_reader,
222
+ tag_mapping_function,
223
+ )
224
+ for (fileid, enc) in self.abspaths(fileids, True)
225
+ ]
226
+ )
227
+
228
+
229
+ class CategorizedTaggedCorpusReader(CategorizedCorpusReader, TaggedCorpusReader):
230
+ """
231
+ A reader for part-of-speech tagged corpora whose documents are
232
+ divided into categories based on their file identifiers.
233
+ """
234
+
235
+ def __init__(self, *args, **kwargs):
236
+ """
237
+ Initialize the corpus reader. Categorization arguments
238
+ (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
239
+ the ``CategorizedCorpusReader`` constructor. The remaining arguments
240
+ are passed to the ``TaggedCorpusReader``.
241
+ """
242
+ CategorizedCorpusReader.__init__(self, kwargs)
243
+ TaggedCorpusReader.__init__(self, *args, **kwargs)
244
+
245
+ def tagged_words(self, fileids=None, categories=None, tagset=None):
246
+ return super().tagged_words(self._resolve(fileids, categories), tagset)
247
+
248
+ def tagged_sents(self, fileids=None, categories=None, tagset=None):
249
+ return super().tagged_sents(self._resolve(fileids, categories), tagset)
250
+
251
+ def tagged_paras(self, fileids=None, categories=None, tagset=None):
252
+ return super().tagged_paras(self._resolve(fileids, categories), tagset)
253
+
254
+
255
+ class TaggedCorpusView(StreamBackedCorpusView):
256
+ """
257
+ A specialized corpus view for tagged documents. It can be
258
+ customized via flags to divide the tagged corpus documents up by
259
+ sentence or paragraph, and to include or omit part of speech tags.
260
+ ``TaggedCorpusView`` objects are typically created by
261
+ ``TaggedCorpusReader`` (not directly by nltk users).
262
+ """
263
+
264
+ def __init__(
265
+ self,
266
+ corpus_file,
267
+ encoding,
268
+ tagged,
269
+ group_by_sent,
270
+ group_by_para,
271
+ sep,
272
+ word_tokenizer,
273
+ sent_tokenizer,
274
+ para_block_reader,
275
+ tag_mapping_function=None,
276
+ ):
277
+ self._tagged = tagged
278
+ self._group_by_sent = group_by_sent
279
+ self._group_by_para = group_by_para
280
+ self._sep = sep
281
+ self._word_tokenizer = word_tokenizer
282
+ self._sent_tokenizer = sent_tokenizer
283
+ self._para_block_reader = para_block_reader
284
+ self._tag_mapping_function = tag_mapping_function
285
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
286
+
287
+ def read_block(self, stream):
288
+ """Reads one paragraph at a time."""
289
+ block = []
290
+ for para_str in self._para_block_reader(stream):
291
+ para = []
292
+ for sent_str in self._sent_tokenizer.tokenize(para_str):
293
+ sent = [
294
+ str2tuple(s, self._sep)
295
+ for s in self._word_tokenizer.tokenize(sent_str)
296
+ ]
297
+ if self._tag_mapping_function:
298
+ sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent]
299
+ if not self._tagged:
300
+ sent = [w for (w, t) in sent]
301
+ if self._group_by_sent:
302
+ para.append(sent)
303
+ else:
304
+ para.extend(sent)
305
+ if self._group_by_para:
306
+ block.append(para)
307
+ else:
308
+ block.extend(para)
309
+ return block
310
+
311
+
312
+ # needs to implement simplified tags
313
+ class MacMorphoCorpusReader(TaggedCorpusReader):
314
+ """
315
+ A corpus reader for the MAC_MORPHO corpus. Each line contains a
316
+ single tagged word, using '_' as a separator. Sentence boundaries
317
+ are based on the end-sentence tag ('_.'). Paragraph information
318
+ is not included in the corpus, so each paragraph returned by
319
+ ``self.paras()`` and ``self.tagged_paras()`` contains a single
320
+ sentence.
321
+ """
322
+
323
+ def __init__(self, root, fileids, encoding="utf8", tagset=None):
324
+ TaggedCorpusReader.__init__(
325
+ self,
326
+ root,
327
+ fileids,
328
+ sep="_",
329
+ word_tokenizer=LineTokenizer(),
330
+ sent_tokenizer=RegexpTokenizer(".*\n"),
331
+ para_block_reader=self._read_block,
332
+ encoding=encoding,
333
+ tagset=tagset,
334
+ )
335
+
336
+ def _read_block(self, stream):
337
+ return read_regexp_block(stream, r".*", r".*_\.")
338
+
339
+
340
+ class TimitTaggedCorpusReader(TaggedCorpusReader):
341
+ """
342
+ A corpus reader for tagged sentences that are included in the TIMIT corpus.
343
+ """
344
+
345
+ def __init__(self, *args, **kwargs):
346
+ TaggedCorpusReader.__init__(
347
+ self, para_block_reader=read_timit_block, *args, **kwargs
348
+ )
349
+
350
+ def paras(self):
351
+ raise NotImplementedError("use sents() instead")
352
+
353
+ def tagged_paras(self):
354
+ raise NotImplementedError("use tagged_sents() instead")
lib/python3.10/site-packages/nltk/corpus/reader/twitter.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Twitter Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A reader for corpora that consist of Tweets. It is assumed that the Tweets
10
+ have been serialised into line-delimited JSON.
11
+ """
12
+
13
+ import json
14
+ import os
15
+
16
+ from nltk.corpus.reader.api import CorpusReader
17
+ from nltk.corpus.reader.util import StreamBackedCorpusView, ZipFilePathPointer, concat
18
+ from nltk.tokenize import TweetTokenizer
19
+
20
+
21
+ class TwitterCorpusReader(CorpusReader):
22
+ r"""
23
+ Reader for corpora that consist of Tweets represented as a list of line-delimited JSON.
24
+
25
+ Individual Tweets can be tokenized using the default tokenizer, or by a
26
+ custom tokenizer specified as a parameter to the constructor.
27
+
28
+ Construct a new Tweet corpus reader for a set of documents
29
+ located at the given root directory.
30
+
31
+ If you made your own tweet collection in a directory called
32
+ `twitter-files`, then you can initialise the reader as::
33
+
34
+ from nltk.corpus import TwitterCorpusReader
35
+ reader = TwitterCorpusReader(root='/path/to/twitter-files', '.*\.json')
36
+
37
+ However, the recommended approach is to set the relevant directory as the
38
+ value of the environmental variable `TWITTER`, and then invoke the reader
39
+ as follows::
40
+
41
+ root = os.environ['TWITTER']
42
+ reader = TwitterCorpusReader(root, '.*\.json')
43
+
44
+ If you want to work directly with the raw Tweets, the `json` library can
45
+ be used::
46
+
47
+ import json
48
+ for tweet in reader.docs():
49
+ print(json.dumps(tweet, indent=1, sort_keys=True))
50
+
51
+ """
52
+
53
+ CorpusView = StreamBackedCorpusView
54
+ """
55
+ The corpus view class used by this reader.
56
+ """
57
+
58
+ def __init__(
59
+ self, root, fileids=None, word_tokenizer=TweetTokenizer(), encoding="utf8"
60
+ ):
61
+ """
62
+ :param root: The root directory for this corpus.
63
+ :param fileids: A list or regexp specifying the fileids in this corpus.
64
+ :param word_tokenizer: Tokenizer for breaking the text of Tweets into
65
+ smaller units, including but not limited to words.
66
+ """
67
+ CorpusReader.__init__(self, root, fileids, encoding)
68
+
69
+ for path in self.abspaths(self._fileids):
70
+ if isinstance(path, ZipFilePathPointer):
71
+ pass
72
+ elif os.path.getsize(path) == 0:
73
+ raise ValueError(f"File {path} is empty")
74
+ """Check that all user-created corpus files are non-empty."""
75
+
76
+ self._word_tokenizer = word_tokenizer
77
+
78
+ def docs(self, fileids=None):
79
+ """
80
+ Returns the full Tweet objects, as specified by `Twitter
81
+ documentation on Tweets
82
+ <https://dev.twitter.com/docs/platform-objects/tweets>`_
83
+
84
+ :return: the given file(s) as a list of dictionaries deserialised
85
+ from JSON.
86
+ :rtype: list(dict)
87
+ """
88
+ return concat(
89
+ [
90
+ self.CorpusView(path, self._read_tweets, encoding=enc)
91
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
92
+ ]
93
+ )
94
+
95
+ def strings(self, fileids=None):
96
+ """
97
+ Returns only the text content of Tweets in the file(s)
98
+
99
+ :return: the given file(s) as a list of Tweets.
100
+ :rtype: list(str)
101
+ """
102
+ fulltweets = self.docs(fileids)
103
+ tweets = []
104
+ for jsono in fulltweets:
105
+ try:
106
+ text = jsono["text"]
107
+ if isinstance(text, bytes):
108
+ text = text.decode(self.encoding)
109
+ tweets.append(text)
110
+ except KeyError:
111
+ pass
112
+ return tweets
113
+
114
+ def tokenized(self, fileids=None):
115
+ """
116
+ :return: the given file(s) as a list of the text content of Tweets as
117
+ as a list of words, screenanames, hashtags, URLs and punctuation symbols.
118
+
119
+ :rtype: list(list(str))
120
+ """
121
+ tweets = self.strings(fileids)
122
+ tokenizer = self._word_tokenizer
123
+ return [tokenizer.tokenize(t) for t in tweets]
124
+
125
+ def _read_tweets(self, stream):
126
+ """
127
+ Assumes that each line in ``stream`` is a JSON-serialised object.
128
+ """
129
+ tweets = []
130
+ for i in range(10):
131
+ line = stream.readline()
132
+ if not line:
133
+ return tweets
134
+ tweet = json.loads(line)
135
+ tweets.append(tweet)
136
+ return tweets
lib/python3.10/site-packages/nltk/corpus/reader/udhr.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ UDHR corpus reader. It mostly deals with encodings.
3
+ """
4
+
5
+ from nltk.corpus.reader.plaintext import PlaintextCorpusReader
6
+ from nltk.corpus.reader.util import find_corpus_fileids
7
+
8
+
9
+ class UdhrCorpusReader(PlaintextCorpusReader):
10
+
11
+ ENCODINGS = [
12
+ (".*-Latin1$", "latin-1"),
13
+ (".*-Hebrew$", "hebrew"),
14
+ (".*-Arabic$", "cp1256"),
15
+ ("Czech_Cesky-UTF8", "cp1250"), # yeah
16
+ ("Polish-Latin2", "cp1250"),
17
+ ("Polish_Polski-Latin2", "cp1250"),
18
+ (".*-Cyrillic$", "cyrillic"),
19
+ (".*-SJIS$", "SJIS"),
20
+ (".*-GB2312$", "GB2312"),
21
+ (".*-Latin2$", "ISO-8859-2"),
22
+ (".*-Greek$", "greek"),
23
+ (".*-UTF8$", "utf-8"),
24
+ ("Hungarian_Magyar-Unicode", "utf-16-le"),
25
+ ("Amahuaca", "latin1"),
26
+ ("Turkish_Turkce-Turkish", "latin5"),
27
+ ("Lithuanian_Lietuviskai-Baltic", "latin4"),
28
+ ("Japanese_Nihongo-EUC", "EUC-JP"),
29
+ ("Japanese_Nihongo-JIS", "iso2022_jp"),
30
+ ("Chinese_Mandarin-HZ", "hz"),
31
+ (r"Abkhaz\-Cyrillic\+Abkh", "cp1251"),
32
+ ]
33
+
34
+ SKIP = {
35
+ # The following files are not fully decodable because they
36
+ # were truncated at wrong bytes:
37
+ "Burmese_Myanmar-UTF8",
38
+ "Japanese_Nihongo-JIS",
39
+ "Chinese_Mandarin-HZ",
40
+ "Chinese_Mandarin-UTF8",
41
+ "Gujarati-UTF8",
42
+ "Hungarian_Magyar-Unicode",
43
+ "Lao-UTF8",
44
+ "Magahi-UTF8",
45
+ "Marathi-UTF8",
46
+ "Tamil-UTF8",
47
+ # Unfortunately, encodings required for reading
48
+ # the following files are not supported by Python:
49
+ "Vietnamese-VPS",
50
+ "Vietnamese-VIQR",
51
+ "Vietnamese-TCVN",
52
+ "Magahi-Agra",
53
+ "Bhojpuri-Agra",
54
+ "Esperanto-T61", # latin3 raises an exception
55
+ # The following files are encoded for specific fonts:
56
+ "Burmese_Myanmar-WinResearcher",
57
+ "Armenian-DallakHelv",
58
+ "Tigrinya_Tigrigna-VG2Main",
59
+ "Amharic-Afenegus6..60375", # ?
60
+ "Navaho_Dine-Navajo-Navaho-font",
61
+ # What are these?
62
+ "Azeri_Azerbaijani_Cyrillic-Az.Times.Cyr.Normal0117",
63
+ "Azeri_Azerbaijani_Latin-Az.Times.Lat0117",
64
+ # The following files are unintended:
65
+ "Czech-Latin2-err",
66
+ "Russian_Russky-UTF8~",
67
+ }
68
+
69
+ def __init__(self, root="udhr"):
70
+ fileids = find_corpus_fileids(root, r"(?!README|\.).*")
71
+ super().__init__(
72
+ root,
73
+ [fileid for fileid in fileids if fileid not in self.SKIP],
74
+ encoding=self.ENCODINGS,
75
+ )
lib/python3.10/site-packages/nltk/corpus/reader/util.py ADDED
@@ -0,0 +1,867 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus Reader Utilities
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import bisect
10
+ import os
11
+ import pickle
12
+ import re
13
+ import tempfile
14
+ from functools import reduce
15
+ from xml.etree import ElementTree
16
+
17
+ from nltk.data import (
18
+ FileSystemPathPointer,
19
+ PathPointer,
20
+ SeekableUnicodeStreamReader,
21
+ ZipFilePathPointer,
22
+ )
23
+ from nltk.internals import slice_bounds
24
+ from nltk.tokenize import wordpunct_tokenize
25
+ from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence
26
+
27
+ ######################################################################
28
+ # { Corpus View
29
+ ######################################################################
30
+
31
+
32
+ class StreamBackedCorpusView(AbstractLazySequence):
33
+ """
34
+ A 'view' of a corpus file, which acts like a sequence of tokens:
35
+ it can be accessed by index, iterated over, etc. However, the
36
+ tokens are only constructed as-needed -- the entire corpus is
37
+ never stored in memory at once.
38
+
39
+ The constructor to ``StreamBackedCorpusView`` takes two arguments:
40
+ a corpus fileid (specified as a string or as a ``PathPointer``);
41
+ and a block reader. A "block reader" is a function that reads
42
+ zero or more tokens from a stream, and returns them as a list. A
43
+ very simple example of a block reader is:
44
+
45
+ >>> def simple_block_reader(stream):
46
+ ... return stream.readline().split()
47
+
48
+ This simple block reader reads a single line at a time, and
49
+ returns a single token (consisting of a string) for each
50
+ whitespace-separated substring on the line.
51
+
52
+ When deciding how to define the block reader for a given
53
+ corpus, careful consideration should be given to the size of
54
+ blocks handled by the block reader. Smaller block sizes will
55
+ increase the memory requirements of the corpus view's internal
56
+ data structures (by 2 integers per block). On the other hand,
57
+ larger block sizes may decrease performance for random access to
58
+ the corpus. (But note that larger block sizes will *not*
59
+ decrease performance for iteration.)
60
+
61
+ Internally, ``CorpusView`` maintains a partial mapping from token
62
+ index to file position, with one entry per block. When a token
63
+ with a given index *i* is requested, the ``CorpusView`` constructs
64
+ it as follows:
65
+
66
+ 1. First, it searches the toknum/filepos mapping for the token
67
+ index closest to (but less than or equal to) *i*.
68
+
69
+ 2. Then, starting at the file position corresponding to that
70
+ index, it reads one block at a time using the block reader
71
+ until it reaches the requested token.
72
+
73
+ The toknum/filepos mapping is created lazily: it is initially
74
+ empty, but every time a new block is read, the block's
75
+ initial token is added to the mapping. (Thus, the toknum/filepos
76
+ map has one entry per block.)
77
+
78
+ In order to increase efficiency for random access patterns that
79
+ have high degrees of locality, the corpus view may cache one or
80
+ more blocks.
81
+
82
+ :note: Each ``CorpusView`` object internally maintains an open file
83
+ object for its underlying corpus file. This file should be
84
+ automatically closed when the ``CorpusView`` is garbage collected,
85
+ but if you wish to close it manually, use the ``close()``
86
+ method. If you access a ``CorpusView``'s items after it has been
87
+ closed, the file object will be automatically re-opened.
88
+
89
+ :warning: If the contents of the file are modified during the
90
+ lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior
91
+ is undefined.
92
+
93
+ :warning: If a unicode encoding is specified when constructing a
94
+ ``CorpusView``, then the block reader may only call
95
+ ``stream.seek()`` with offsets that have been returned by
96
+ ``stream.tell()``; in particular, calling ``stream.seek()`` with
97
+ relative offsets, or with offsets based on string lengths, may
98
+ lead to incorrect behavior.
99
+
100
+ :ivar _block_reader: The function used to read
101
+ a single block from the underlying file stream.
102
+ :ivar _toknum: A list containing the token index of each block
103
+ that has been processed. In particular, ``_toknum[i]`` is the
104
+ token index of the first token in block ``i``. Together
105
+ with ``_filepos``, this forms a partial mapping between token
106
+ indices and file positions.
107
+ :ivar _filepos: A list containing the file position of each block
108
+ that has been processed. In particular, ``_toknum[i]`` is the
109
+ file position of the first character in block ``i``. Together
110
+ with ``_toknum``, this forms a partial mapping between token
111
+ indices and file positions.
112
+ :ivar _stream: The stream used to access the underlying corpus file.
113
+ :ivar _len: The total number of tokens in the corpus, if known;
114
+ or None, if the number of tokens is not yet known.
115
+ :ivar _eofpos: The character position of the last character in the
116
+ file. This is calculated when the corpus view is initialized,
117
+ and is used to decide when the end of file has been reached.
118
+ :ivar _cache: A cache of the most recently read block. It
119
+ is encoded as a tuple (start_toknum, end_toknum, tokens), where
120
+ start_toknum is the token index of the first token in the block;
121
+ end_toknum is the token index of the first token not in the
122
+ block; and tokens is a list of the tokens in the block.
123
+ """
124
+
125
+ def __init__(self, fileid, block_reader=None, startpos=0, encoding="utf8"):
126
+ """
127
+ Create a new corpus view, based on the file ``fileid``, and
128
+ read with ``block_reader``. See the class documentation
129
+ for more information.
130
+
131
+ :param fileid: The path to the file that is read by this
132
+ corpus view. ``fileid`` can either be a string or a
133
+ ``PathPointer``.
134
+
135
+ :param startpos: The file position at which the view will
136
+ start reading. This can be used to skip over preface
137
+ sections.
138
+
139
+ :param encoding: The unicode encoding that should be used to
140
+ read the file's contents. If no encoding is specified,
141
+ then the file's contents will be read as a non-unicode
142
+ string (i.e., a str).
143
+ """
144
+ if block_reader:
145
+ self.read_block = block_reader
146
+ # Initialize our toknum/filepos mapping.
147
+ self._toknum = [0]
148
+ self._filepos = [startpos]
149
+ self._encoding = encoding
150
+ # We don't know our length (number of tokens) yet.
151
+ self._len = None
152
+
153
+ self._fileid = fileid
154
+ self._stream = None
155
+
156
+ self._current_toknum = None
157
+ """This variable is set to the index of the next token that
158
+ will be read, immediately before ``self.read_block()`` is
159
+ called. This is provided for the benefit of the block
160
+ reader, which under rare circumstances may need to know
161
+ the current token number."""
162
+
163
+ self._current_blocknum = None
164
+ """This variable is set to the index of the next block that
165
+ will be read, immediately before ``self.read_block()`` is
166
+ called. This is provided for the benefit of the block
167
+ reader, which under rare circumstances may need to know
168
+ the current block number."""
169
+
170
+ # Find the length of the file.
171
+ try:
172
+ if isinstance(self._fileid, PathPointer):
173
+ self._eofpos = self._fileid.file_size()
174
+ else:
175
+ self._eofpos = os.stat(self._fileid).st_size
176
+ except Exception as exc:
177
+ raise ValueError(f"Unable to open or access {fileid!r} -- {exc}") from exc
178
+
179
+ # Maintain a cache of the most recently read block, to
180
+ # increase efficiency of random access.
181
+ self._cache = (-1, -1, None)
182
+
183
+ fileid = property(
184
+ lambda self: self._fileid,
185
+ doc="""
186
+ The fileid of the file that is accessed by this view.
187
+
188
+ :type: str or PathPointer""",
189
+ )
190
+
191
+ def read_block(self, stream):
192
+ """
193
+ Read a block from the input stream.
194
+
195
+ :return: a block of tokens from the input stream
196
+ :rtype: list(any)
197
+ :param stream: an input stream
198
+ :type stream: stream
199
+ """
200
+ raise NotImplementedError("Abstract Method")
201
+
202
+ def _open(self):
203
+ """
204
+ Open the file stream associated with this corpus view. This
205
+ will be called performed if any value is read from the view
206
+ while its file stream is closed.
207
+ """
208
+ if isinstance(self._fileid, PathPointer):
209
+ self._stream = self._fileid.open(self._encoding)
210
+ elif self._encoding:
211
+ self._stream = SeekableUnicodeStreamReader(
212
+ open(self._fileid, "rb"), self._encoding
213
+ )
214
+ else:
215
+ self._stream = open(self._fileid, "rb")
216
+
217
+ def close(self):
218
+ """
219
+ Close the file stream associated with this corpus view. This
220
+ can be useful if you are worried about running out of file
221
+ handles (although the stream should automatically be closed
222
+ upon garbage collection of the corpus view). If the corpus
223
+ view is accessed after it is closed, it will be automatically
224
+ re-opened.
225
+ """
226
+ if self._stream is not None:
227
+ self._stream.close()
228
+ self._stream = None
229
+
230
+ def __enter__(self):
231
+ return self
232
+
233
+ def __exit__(self, type, value, traceback):
234
+ self.close()
235
+
236
+ def __len__(self):
237
+ if self._len is None:
238
+ # iterate_from() sets self._len when it reaches the end
239
+ # of the file:
240
+ for tok in self.iterate_from(self._toknum[-1]):
241
+ pass
242
+ return self._len
243
+
244
+ def __getitem__(self, i):
245
+ if isinstance(i, slice):
246
+ start, stop = slice_bounds(self, i)
247
+ # Check if it's in the cache.
248
+ offset = self._cache[0]
249
+ if offset <= start and stop <= self._cache[1]:
250
+ return self._cache[2][start - offset : stop - offset]
251
+ # Construct & return the result.
252
+ return LazySubsequence(self, start, stop)
253
+ else:
254
+ # Handle negative indices
255
+ if i < 0:
256
+ i += len(self)
257
+ if i < 0:
258
+ raise IndexError("index out of range")
259
+ # Check if it's in the cache.
260
+ offset = self._cache[0]
261
+ if offset <= i < self._cache[1]:
262
+ return self._cache[2][i - offset]
263
+ # Use iterate_from to extract it.
264
+ try:
265
+ return next(self.iterate_from(i))
266
+ except StopIteration as e:
267
+ raise IndexError("index out of range") from e
268
+
269
+ # If we wanted to be thread-safe, then this method would need to
270
+ # do some locking.
271
+ def iterate_from(self, start_tok):
272
+ # Start by feeding from the cache, if possible.
273
+ if self._cache[0] <= start_tok < self._cache[1]:
274
+ for tok in self._cache[2][start_tok - self._cache[0] :]:
275
+ yield tok
276
+ start_tok += 1
277
+
278
+ # Decide where in the file we should start. If `start` is in
279
+ # our mapping, then we can jump straight to the correct block;
280
+ # otherwise, start at the last block we've processed.
281
+ if start_tok < self._toknum[-1]:
282
+ block_index = bisect.bisect_right(self._toknum, start_tok) - 1
283
+ toknum = self._toknum[block_index]
284
+ filepos = self._filepos[block_index]
285
+ else:
286
+ block_index = len(self._toknum) - 1
287
+ toknum = self._toknum[-1]
288
+ filepos = self._filepos[-1]
289
+
290
+ # Open the stream, if it's not open already.
291
+ if self._stream is None:
292
+ self._open()
293
+
294
+ # If the file is empty, the while loop will never run.
295
+ # This *seems* to be all the state we need to set:
296
+ if self._eofpos == 0:
297
+ self._len = 0
298
+
299
+ # Each iteration through this loop, we read a single block
300
+ # from the stream.
301
+ while filepos < self._eofpos:
302
+ # Read the next block.
303
+ self._stream.seek(filepos)
304
+ self._current_toknum = toknum
305
+ self._current_blocknum = block_index
306
+ tokens = self.read_block(self._stream)
307
+ assert isinstance(tokens, (tuple, list, AbstractLazySequence)), (
308
+ "block reader %s() should return list or tuple."
309
+ % self.read_block.__name__
310
+ )
311
+ num_toks = len(tokens)
312
+ new_filepos = self._stream.tell()
313
+ assert (
314
+ new_filepos > filepos
315
+ ), "block reader %s() should consume at least 1 byte (filepos=%d)" % (
316
+ self.read_block.__name__,
317
+ filepos,
318
+ )
319
+
320
+ # Update our cache.
321
+ self._cache = (toknum, toknum + num_toks, list(tokens))
322
+
323
+ # Update our mapping.
324
+ assert toknum <= self._toknum[-1]
325
+ if num_toks > 0:
326
+ block_index += 1
327
+ if toknum == self._toknum[-1]:
328
+ assert new_filepos > self._filepos[-1] # monotonic!
329
+ self._filepos.append(new_filepos)
330
+ self._toknum.append(toknum + num_toks)
331
+ else:
332
+ # Check for consistency:
333
+ assert (
334
+ new_filepos == self._filepos[block_index]
335
+ ), "inconsistent block reader (num chars read)"
336
+ assert (
337
+ toknum + num_toks == self._toknum[block_index]
338
+ ), "inconsistent block reader (num tokens returned)"
339
+
340
+ # If we reached the end of the file, then update self._len
341
+ if new_filepos == self._eofpos:
342
+ self._len = toknum + num_toks
343
+ # Generate the tokens in this block (but skip any tokens
344
+ # before start_tok). Note that between yields, our state
345
+ # may be modified.
346
+ for tok in tokens[max(0, start_tok - toknum) :]:
347
+ yield tok
348
+ # If we're at the end of the file, then we're done.
349
+ assert new_filepos <= self._eofpos
350
+ if new_filepos == self._eofpos:
351
+ break
352
+ # Update our indices
353
+ toknum += num_toks
354
+ filepos = new_filepos
355
+
356
+ # If we reach this point, then we should know our length.
357
+ assert self._len is not None
358
+ # Enforce closing of stream once we reached end of file
359
+ # We should have reached EOF once we're out of the while loop.
360
+ self.close()
361
+
362
+ # Use concat for these, so we can use a ConcatenatedCorpusView
363
+ # when possible.
364
+ def __add__(self, other):
365
+ return concat([self, other])
366
+
367
+ def __radd__(self, other):
368
+ return concat([other, self])
369
+
370
+ def __mul__(self, count):
371
+ return concat([self] * count)
372
+
373
+ def __rmul__(self, count):
374
+ return concat([self] * count)
375
+
376
+
377
+ class ConcatenatedCorpusView(AbstractLazySequence):
378
+ """
379
+ A 'view' of a corpus file that joins together one or more
380
+ ``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most
381
+ one file handle is left open at any time.
382
+ """
383
+
384
+ def __init__(self, corpus_views):
385
+ self._pieces = corpus_views
386
+ """A list of the corpus subviews that make up this
387
+ concatenation."""
388
+
389
+ self._offsets = [0]
390
+ """A list of offsets, indicating the index at which each
391
+ subview begins. In particular::
392
+ offsets[i] = sum([len(p) for p in pieces[:i]])"""
393
+
394
+ self._open_piece = None
395
+ """The most recently accessed corpus subview (or None).
396
+ Before a new subview is accessed, this subview will be closed."""
397
+
398
+ def __len__(self):
399
+ if len(self._offsets) <= len(self._pieces):
400
+ # Iterate to the end of the corpus.
401
+ for tok in self.iterate_from(self._offsets[-1]):
402
+ pass
403
+
404
+ return self._offsets[-1]
405
+
406
+ def close(self):
407
+ for piece in self._pieces:
408
+ piece.close()
409
+
410
+ def iterate_from(self, start_tok):
411
+ piecenum = bisect.bisect_right(self._offsets, start_tok) - 1
412
+
413
+ while piecenum < len(self._pieces):
414
+ offset = self._offsets[piecenum]
415
+ piece = self._pieces[piecenum]
416
+
417
+ # If we've got another piece open, close it first.
418
+ if self._open_piece is not piece:
419
+ if self._open_piece is not None:
420
+ self._open_piece.close()
421
+ self._open_piece = piece
422
+
423
+ # Get everything we can from this piece.
424
+ yield from piece.iterate_from(max(0, start_tok - offset))
425
+
426
+ # Update the offset table.
427
+ if piecenum + 1 == len(self._offsets):
428
+ self._offsets.append(self._offsets[-1] + len(piece))
429
+
430
+ # Move on to the next piece.
431
+ piecenum += 1
432
+
433
+
434
+ def concat(docs):
435
+ """
436
+ Concatenate together the contents of multiple documents from a
437
+ single corpus, using an appropriate concatenation function. This
438
+ utility function is used by corpus readers when the user requests
439
+ more than one document at a time.
440
+ """
441
+ if len(docs) == 1:
442
+ return docs[0]
443
+ if len(docs) == 0:
444
+ raise ValueError("concat() expects at least one object!")
445
+
446
+ types = {d.__class__ for d in docs}
447
+
448
+ # If they're all strings, use string concatenation.
449
+ if all(isinstance(doc, str) for doc in docs):
450
+ return "".join(docs)
451
+
452
+ # If they're all corpus views, then use ConcatenatedCorpusView.
453
+ for typ in types:
454
+ if not issubclass(typ, (StreamBackedCorpusView, ConcatenatedCorpusView)):
455
+ break
456
+ else:
457
+ return ConcatenatedCorpusView(docs)
458
+
459
+ # If they're all lazy sequences, use a lazy concatenation
460
+ for typ in types:
461
+ if not issubclass(typ, AbstractLazySequence):
462
+ break
463
+ else:
464
+ return LazyConcatenation(docs)
465
+
466
+ # Otherwise, see what we can do:
467
+ if len(types) == 1:
468
+ typ = list(types)[0]
469
+
470
+ if issubclass(typ, list):
471
+ return reduce((lambda a, b: a + b), docs, [])
472
+
473
+ if issubclass(typ, tuple):
474
+ return reduce((lambda a, b: a + b), docs, ())
475
+
476
+ if ElementTree.iselement(typ):
477
+ xmltree = ElementTree.Element("documents")
478
+ for doc in docs:
479
+ xmltree.append(doc)
480
+ return xmltree
481
+
482
+ # No method found!
483
+ raise ValueError("Don't know how to concatenate types: %r" % types)
484
+
485
+
486
+ ######################################################################
487
+ # { Corpus View for Pickled Sequences
488
+ ######################################################################
489
+
490
+
491
+ class PickleCorpusView(StreamBackedCorpusView):
492
+ """
493
+ A stream backed corpus view for corpus files that consist of
494
+ sequences of serialized Python objects (serialized using
495
+ ``pickle.dump``). One use case for this class is to store the
496
+ result of running feature detection on a corpus to disk. This can
497
+ be useful when performing feature detection is expensive (so we
498
+ don't want to repeat it); but the corpus is too large to store in
499
+ memory. The following example illustrates this technique:
500
+
501
+ >>> from nltk.corpus.reader.util import PickleCorpusView
502
+ >>> from nltk.util import LazyMap
503
+ >>> feature_corpus = LazyMap(detect_features, corpus) # doctest: +SKIP
504
+ >>> PickleCorpusView.write(feature_corpus, some_fileid) # doctest: +SKIP
505
+ >>> pcv = PickleCorpusView(some_fileid) # doctest: +SKIP
506
+ """
507
+
508
+ BLOCK_SIZE = 100
509
+ PROTOCOL = -1
510
+
511
+ def __init__(self, fileid, delete_on_gc=False):
512
+ """
513
+ Create a new corpus view that reads the pickle corpus
514
+ ``fileid``.
515
+
516
+ :param delete_on_gc: If true, then ``fileid`` will be deleted
517
+ whenever this object gets garbage-collected.
518
+ """
519
+ self._delete_on_gc = delete_on_gc
520
+ StreamBackedCorpusView.__init__(self, fileid)
521
+
522
+ def read_block(self, stream):
523
+ result = []
524
+ for i in range(self.BLOCK_SIZE):
525
+ try:
526
+ result.append(pickle.load(stream))
527
+ except EOFError:
528
+ break
529
+ return result
530
+
531
+ def __del__(self):
532
+ """
533
+ If ``delete_on_gc`` was set to true when this
534
+ ``PickleCorpusView`` was created, then delete the corpus view's
535
+ fileid. (This method is called whenever a
536
+ ``PickledCorpusView`` is garbage-collected.
537
+ """
538
+ if getattr(self, "_delete_on_gc"):
539
+ if os.path.exists(self._fileid):
540
+ try:
541
+ os.remove(self._fileid)
542
+ except OSError:
543
+ pass
544
+ self.__dict__.clear() # make the garbage collector's job easier
545
+
546
+ @classmethod
547
+ def write(cls, sequence, output_file):
548
+ if isinstance(output_file, str):
549
+ output_file = open(output_file, "wb")
550
+ for item in sequence:
551
+ pickle.dump(item, output_file, cls.PROTOCOL)
552
+
553
+ @classmethod
554
+ def cache_to_tempfile(cls, sequence, delete_on_gc=True):
555
+ """
556
+ Write the given sequence to a temporary file as a pickle
557
+ corpus; and then return a ``PickleCorpusView`` view for that
558
+ temporary corpus file.
559
+
560
+ :param delete_on_gc: If true, then the temporary file will be
561
+ deleted whenever this object gets garbage-collected.
562
+ """
563
+ try:
564
+ fd, output_file_name = tempfile.mkstemp(".pcv", "nltk-")
565
+ output_file = os.fdopen(fd, "wb")
566
+ cls.write(sequence, output_file)
567
+ output_file.close()
568
+ return PickleCorpusView(output_file_name, delete_on_gc)
569
+ except OSError as e:
570
+ raise ValueError("Error while creating temp file: %s" % e) from e
571
+
572
+
573
+ ######################################################################
574
+ # { Block Readers
575
+ ######################################################################
576
+
577
+
578
+ def read_whitespace_block(stream):
579
+ toks = []
580
+ for i in range(20): # Read 20 lines at a time.
581
+ toks.extend(stream.readline().split())
582
+ return toks
583
+
584
+
585
+ def read_wordpunct_block(stream):
586
+ toks = []
587
+ for i in range(20): # Read 20 lines at a time.
588
+ toks.extend(wordpunct_tokenize(stream.readline()))
589
+ return toks
590
+
591
+
592
+ def read_line_block(stream):
593
+ toks = []
594
+ for i in range(20):
595
+ line = stream.readline()
596
+ if not line:
597
+ return toks
598
+ toks.append(line.rstrip("\n"))
599
+ return toks
600
+
601
+
602
+ def read_blankline_block(stream):
603
+ s = ""
604
+ while True:
605
+ line = stream.readline()
606
+ # End of file:
607
+ if not line:
608
+ if s:
609
+ return [s]
610
+ else:
611
+ return []
612
+ # Blank line:
613
+ elif line and not line.strip():
614
+ if s:
615
+ return [s]
616
+ # Other line:
617
+ else:
618
+ s += line
619
+
620
+
621
+ def read_alignedsent_block(stream):
622
+ s = ""
623
+ while True:
624
+ line = stream.readline()
625
+ if line[0] == "=" or line[0] == "\n" or line[:2] == "\r\n":
626
+ continue
627
+ # End of file:
628
+ if not line:
629
+ if s:
630
+ return [s]
631
+ else:
632
+ return []
633
+ # Other line:
634
+ else:
635
+ s += line
636
+ if re.match(r"^\d+-\d+", line) is not None:
637
+ return [s]
638
+
639
+
640
+ def read_regexp_block(stream, start_re, end_re=None):
641
+ """
642
+ Read a sequence of tokens from a stream, where tokens begin with
643
+ lines that match ``start_re``. If ``end_re`` is specified, then
644
+ tokens end with lines that match ``end_re``; otherwise, tokens end
645
+ whenever the next line matching ``start_re`` or EOF is found.
646
+ """
647
+ # Scan until we find a line matching the start regexp.
648
+ while True:
649
+ line = stream.readline()
650
+ if not line:
651
+ return [] # end of file.
652
+ if re.match(start_re, line):
653
+ break
654
+
655
+ # Scan until we find another line matching the regexp, or EOF.
656
+ lines = [line]
657
+ while True:
658
+ oldpos = stream.tell()
659
+ line = stream.readline()
660
+ # End of file:
661
+ if not line:
662
+ return ["".join(lines)]
663
+ # End of token:
664
+ if end_re is not None and re.match(end_re, line):
665
+ return ["".join(lines)]
666
+ # Start of new token: backup to just before it starts, and
667
+ # return the token we've already collected.
668
+ if end_re is None and re.match(start_re, line):
669
+ stream.seek(oldpos)
670
+ return ["".join(lines)]
671
+ # Anything else is part of the token.
672
+ lines.append(line)
673
+
674
+
675
+ def read_sexpr_block(stream, block_size=16384, comment_char=None):
676
+ """
677
+ Read a sequence of s-expressions from the stream, and leave the
678
+ stream's file position at the end the last complete s-expression
679
+ read. This function will always return at least one s-expression,
680
+ unless there are no more s-expressions in the file.
681
+
682
+ If the file ends in in the middle of an s-expression, then that
683
+ incomplete s-expression is returned when the end of the file is
684
+ reached.
685
+
686
+ :param block_size: The default block size for reading. If an
687
+ s-expression is longer than one block, then more than one
688
+ block will be read.
689
+ :param comment_char: A character that marks comments. Any lines
690
+ that begin with this character will be stripped out.
691
+ (If spaces or tabs precede the comment character, then the
692
+ line will not be stripped.)
693
+ """
694
+ start = stream.tell()
695
+ block = stream.read(block_size)
696
+ encoding = getattr(stream, "encoding", None)
697
+ assert encoding is not None or isinstance(block, str)
698
+ if encoding not in (None, "utf-8"):
699
+ import warnings
700
+
701
+ warnings.warn(
702
+ "Parsing may fail, depending on the properties "
703
+ "of the %s encoding!" % encoding
704
+ )
705
+ # (e.g., the utf-16 encoding does not work because it insists
706
+ # on adding BOMs to the beginning of encoded strings.)
707
+
708
+ if comment_char:
709
+ COMMENT = re.compile("(?m)^%s.*$" % re.escape(comment_char))
710
+ while True:
711
+ try:
712
+ # If we're stripping comments, then make sure our block ends
713
+ # on a line boundary; and then replace any comments with
714
+ # space characters. (We can't just strip them out -- that
715
+ # would make our offset wrong.)
716
+ if comment_char:
717
+ block += stream.readline()
718
+ block = re.sub(COMMENT, _sub_space, block)
719
+ # Read the block.
720
+ tokens, offset = _parse_sexpr_block(block)
721
+ # Skip whitespace
722
+ offset = re.compile(r"\s*").search(block, offset).end()
723
+
724
+ # Move to the end position.
725
+ if encoding is None:
726
+ stream.seek(start + offset)
727
+ else:
728
+ stream.seek(start + len(block[:offset].encode(encoding)))
729
+
730
+ # Return the list of tokens we processed
731
+ return tokens
732
+ except ValueError as e:
733
+ if e.args[0] == "Block too small":
734
+ next_block = stream.read(block_size)
735
+ if next_block:
736
+ block += next_block
737
+ continue
738
+ else:
739
+ # The file ended mid-sexpr -- return what we got.
740
+ return [block.strip()]
741
+ else:
742
+ raise
743
+
744
+
745
+ def _sub_space(m):
746
+ """Helper function: given a regexp match, return a string of
747
+ spaces that's the same length as the matched string."""
748
+ return " " * (m.end() - m.start())
749
+
750
+
751
+ def _parse_sexpr_block(block):
752
+ tokens = []
753
+ start = end = 0
754
+
755
+ while end < len(block):
756
+ m = re.compile(r"\S").search(block, end)
757
+ if not m:
758
+ return tokens, end
759
+
760
+ start = m.start()
761
+
762
+ # Case 1: sexpr is not parenthesized.
763
+ if m.group() != "(":
764
+ m2 = re.compile(r"[\s(]").search(block, start)
765
+ if m2:
766
+ end = m2.start()
767
+ else:
768
+ if tokens:
769
+ return tokens, end
770
+ raise ValueError("Block too small")
771
+
772
+ # Case 2: parenthesized sexpr.
773
+ else:
774
+ nesting = 0
775
+ for m in re.compile(r"[()]").finditer(block, start):
776
+ if m.group() == "(":
777
+ nesting += 1
778
+ else:
779
+ nesting -= 1
780
+ if nesting == 0:
781
+ end = m.end()
782
+ break
783
+ else:
784
+ if tokens:
785
+ return tokens, end
786
+ raise ValueError("Block too small")
787
+
788
+ tokens.append(block[start:end])
789
+
790
+ return tokens, end
791
+
792
+
793
+ ######################################################################
794
+ # { Finding Corpus Items
795
+ ######################################################################
796
+
797
+
798
+ def find_corpus_fileids(root, regexp):
799
+ if not isinstance(root, PathPointer):
800
+ raise TypeError("find_corpus_fileids: expected a PathPointer")
801
+ regexp += "$"
802
+
803
+ # Find fileids in a zipfile: scan the zipfile's namelist. Filter
804
+ # out entries that end in '/' -- they're directories.
805
+ if isinstance(root, ZipFilePathPointer):
806
+ fileids = [
807
+ name[len(root.entry) :]
808
+ for name in root.zipfile.namelist()
809
+ if not name.endswith("/")
810
+ ]
811
+ items = [name for name in fileids if re.match(regexp, name)]
812
+ return sorted(items)
813
+
814
+ # Find fileids in a directory: use os.walk to search all (proper
815
+ # or symlinked) subdirectories, and match paths against the regexp.
816
+ elif isinstance(root, FileSystemPathPointer):
817
+ items = []
818
+ for dirname, subdirs, fileids in os.walk(root.path):
819
+ prefix = "".join("%s/" % p for p in _path_from(root.path, dirname))
820
+ items += [
821
+ prefix + fileid
822
+ for fileid in fileids
823
+ if re.match(regexp, prefix + fileid)
824
+ ]
825
+ # Don't visit svn directories:
826
+ if ".svn" in subdirs:
827
+ subdirs.remove(".svn")
828
+ return sorted(items)
829
+
830
+ else:
831
+ raise AssertionError("Don't know how to handle %r" % root)
832
+
833
+
834
+ def _path_from(parent, child):
835
+ if os.path.split(parent)[1] == "":
836
+ parent = os.path.split(parent)[0]
837
+ path = []
838
+ while parent != child:
839
+ child, dirname = os.path.split(child)
840
+ path.insert(0, dirname)
841
+ assert os.path.split(child)[0] != child
842
+ return path
843
+
844
+
845
+ ######################################################################
846
+ # { Paragraph structure in Treebank files
847
+ ######################################################################
848
+
849
+
850
+ def tagged_treebank_para_block_reader(stream):
851
+ # Read the next paragraph.
852
+ para = ""
853
+ while True:
854
+ line = stream.readline()
855
+ # End of paragraph:
856
+ if re.match(r"======+\s*$", line):
857
+ if para.strip():
858
+ return [para]
859
+ # End of file:
860
+ elif line == "":
861
+ if para.strip():
862
+ return [para]
863
+ else:
864
+ return []
865
+ # Content line:
866
+ else:
867
+ para += line
lib/python3.10/site-packages/nltk/corpus/reader/verbnet.py ADDED
@@ -0,0 +1,629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Verbnet Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ An NLTK interface to the VerbNet verb lexicon
10
+
11
+ For details about VerbNet see:
12
+ https://verbs.colorado.edu/~mpalmer/projects/verbnet.html
13
+ """
14
+
15
+ import re
16
+ import textwrap
17
+ from collections import defaultdict
18
+
19
+ from nltk.corpus.reader.xmldocs import XMLCorpusReader
20
+
21
+
22
+ class VerbnetCorpusReader(XMLCorpusReader):
23
+ """
24
+ An NLTK interface to the VerbNet verb lexicon.
25
+
26
+ From the VerbNet site: "VerbNet (VN) (Kipper-Schuler 2006) is the largest
27
+ on-line verb lexicon currently available for English. It is a hierarchical
28
+ domain-independent, broad-coverage verb lexicon with mappings to other
29
+ lexical resources such as WordNet (Miller, 1990; Fellbaum, 1998), XTAG
30
+ (XTAG Research Group, 2001), and FrameNet (Baker et al., 1998)."
31
+
32
+ For details about VerbNet see:
33
+ https://verbs.colorado.edu/~mpalmer/projects/verbnet.html
34
+ """
35
+
36
+ # No unicode encoding param, since the data files are all XML.
37
+ def __init__(self, root, fileids, wrap_etree=False):
38
+ XMLCorpusReader.__init__(self, root, fileids, wrap_etree)
39
+
40
+ self._lemma_to_class = defaultdict(list)
41
+ """A dictionary mapping from verb lemma strings to lists of
42
+ VerbNet class identifiers."""
43
+
44
+ self._wordnet_to_class = defaultdict(list)
45
+ """A dictionary mapping from wordnet identifier strings to
46
+ lists of VerbNet class identifiers."""
47
+
48
+ self._class_to_fileid = {}
49
+ """A dictionary mapping from class identifiers to
50
+ corresponding file identifiers. The keys of this dictionary
51
+ provide a complete list of all classes and subclasses."""
52
+
53
+ self._shortid_to_longid = {}
54
+
55
+ # Initialize the dictionaries. Use the quick (regexp-based)
56
+ # method instead of the slow (xml-based) method, because it
57
+ # runs 2-30 times faster.
58
+ self._quick_index()
59
+
60
+ _LONGID_RE = re.compile(r"([^\-\.]*)-([\d+.\-]+)$")
61
+ """Regular expression that matches (and decomposes) longids"""
62
+
63
+ _SHORTID_RE = re.compile(r"[\d+.\-]+$")
64
+ """Regular expression that matches shortids"""
65
+
66
+ _INDEX_RE = re.compile(
67
+ r'<MEMBER name="\??([^"]+)" wn="([^"]*)"[^>]+>|' r'<VNSUBCLASS ID="([^"]+)"/?>'
68
+ )
69
+ """Regular expression used by ``_index()`` to quickly scan the corpus
70
+ for basic information."""
71
+
72
+ def lemmas(self, vnclass=None):
73
+ """
74
+ Return a list of all verb lemmas that appear in any class, or
75
+ in the ``classid`` if specified.
76
+ """
77
+ if vnclass is None:
78
+ return sorted(self._lemma_to_class.keys())
79
+ else:
80
+ # [xx] should this include subclass members?
81
+ if isinstance(vnclass, str):
82
+ vnclass = self.vnclass(vnclass)
83
+ return [member.get("name") for member in vnclass.findall("MEMBERS/MEMBER")]
84
+
85
+ def wordnetids(self, vnclass=None):
86
+ """
87
+ Return a list of all wordnet identifiers that appear in any
88
+ class, or in ``classid`` if specified.
89
+ """
90
+ if vnclass is None:
91
+ return sorted(self._wordnet_to_class.keys())
92
+ else:
93
+ # [xx] should this include subclass members?
94
+ if isinstance(vnclass, str):
95
+ vnclass = self.vnclass(vnclass)
96
+ return sum(
97
+ (
98
+ member.get("wn", "").split()
99
+ for member in vnclass.findall("MEMBERS/MEMBER")
100
+ ),
101
+ [],
102
+ )
103
+
104
+ def classids(self, lemma=None, wordnetid=None, fileid=None, classid=None):
105
+ """
106
+ Return a list of the VerbNet class identifiers. If a file
107
+ identifier is specified, then return only the VerbNet class
108
+ identifiers for classes (and subclasses) defined by that file.
109
+ If a lemma is specified, then return only VerbNet class
110
+ identifiers for classes that contain that lemma as a member.
111
+ If a wordnetid is specified, then return only identifiers for
112
+ classes that contain that wordnetid as a member. If a classid
113
+ is specified, then return only identifiers for subclasses of
114
+ the specified VerbNet class.
115
+ If nothing is specified, return all classids within VerbNet
116
+ """
117
+ if fileid is not None:
118
+ return [c for (c, f) in self._class_to_fileid.items() if f == fileid]
119
+ elif lemma is not None:
120
+ return self._lemma_to_class[lemma]
121
+ elif wordnetid is not None:
122
+ return self._wordnet_to_class[wordnetid]
123
+ elif classid is not None:
124
+ xmltree = self.vnclass(classid)
125
+ return [
126
+ subclass.get("ID")
127
+ for subclass in xmltree.findall("SUBCLASSES/VNSUBCLASS")
128
+ ]
129
+ else:
130
+ return sorted(self._class_to_fileid.keys())
131
+
132
+ def vnclass(self, fileid_or_classid):
133
+ """Returns VerbNet class ElementTree
134
+
135
+ Return an ElementTree containing the xml for the specified
136
+ VerbNet class.
137
+
138
+ :param fileid_or_classid: An identifier specifying which class
139
+ should be returned. Can be a file identifier (such as
140
+ ``'put-9.1.xml'``), or a VerbNet class identifier (such as
141
+ ``'put-9.1'``) or a short VerbNet class identifier (such as
142
+ ``'9.1'``).
143
+ """
144
+ # File identifier: just return the xml.
145
+ if fileid_or_classid in self._fileids:
146
+ return self.xml(fileid_or_classid)
147
+
148
+ # Class identifier: get the xml, and find the right elt.
149
+ classid = self.longid(fileid_or_classid)
150
+ if classid in self._class_to_fileid:
151
+ fileid = self._class_to_fileid[self.longid(classid)]
152
+ tree = self.xml(fileid)
153
+ if classid == tree.get("ID"):
154
+ return tree
155
+ else:
156
+ for subclass in tree.findall(".//VNSUBCLASS"):
157
+ if classid == subclass.get("ID"):
158
+ return subclass
159
+ else:
160
+ assert False # we saw it during _index()!
161
+
162
+ else:
163
+ raise ValueError(f"Unknown identifier {fileid_or_classid}")
164
+
165
+ def fileids(self, vnclass_ids=None):
166
+ """
167
+ Return a list of fileids that make up this corpus. If
168
+ ``vnclass_ids`` is specified, then return the fileids that make
169
+ up the specified VerbNet class(es).
170
+ """
171
+ if vnclass_ids is None:
172
+ return self._fileids
173
+ elif isinstance(vnclass_ids, str):
174
+ return [self._class_to_fileid[self.longid(vnclass_ids)]]
175
+ else:
176
+ return [
177
+ self._class_to_fileid[self.longid(vnclass_id)]
178
+ for vnclass_id in vnclass_ids
179
+ ]
180
+
181
+ def frames(self, vnclass):
182
+ """Given a VerbNet class, this method returns VerbNet frames
183
+
184
+ The members returned are:
185
+ 1) Example
186
+ 2) Description
187
+ 3) Syntax
188
+ 4) Semantics
189
+
190
+ :param vnclass: A VerbNet class identifier; or an ElementTree
191
+ containing the xml contents of a VerbNet class.
192
+ :return: frames - a list of frame dictionaries
193
+ """
194
+ if isinstance(vnclass, str):
195
+ vnclass = self.vnclass(vnclass)
196
+ frames = []
197
+ vnframes = vnclass.findall("FRAMES/FRAME")
198
+ for vnframe in vnframes:
199
+ frames.append(
200
+ {
201
+ "example": self._get_example_within_frame(vnframe),
202
+ "description": self._get_description_within_frame(vnframe),
203
+ "syntax": self._get_syntactic_list_within_frame(vnframe),
204
+ "semantics": self._get_semantics_within_frame(vnframe),
205
+ }
206
+ )
207
+ return frames
208
+
209
+ def subclasses(self, vnclass):
210
+ """Returns subclass ids, if any exist
211
+
212
+ Given a VerbNet class, this method returns subclass ids (if they exist)
213
+ in a list of strings.
214
+
215
+ :param vnclass: A VerbNet class identifier; or an ElementTree
216
+ containing the xml contents of a VerbNet class.
217
+ :return: list of subclasses
218
+ """
219
+ if isinstance(vnclass, str):
220
+ vnclass = self.vnclass(vnclass)
221
+
222
+ subclasses = [
223
+ subclass.get("ID") for subclass in vnclass.findall("SUBCLASSES/VNSUBCLASS")
224
+ ]
225
+ return subclasses
226
+
227
+ def themroles(self, vnclass):
228
+ """Returns thematic roles participating in a VerbNet class
229
+
230
+ Members returned as part of roles are-
231
+ 1) Type
232
+ 2) Modifiers
233
+
234
+ :param vnclass: A VerbNet class identifier; or an ElementTree
235
+ containing the xml contents of a VerbNet class.
236
+ :return: themroles: A list of thematic roles in the VerbNet class
237
+ """
238
+ if isinstance(vnclass, str):
239
+ vnclass = self.vnclass(vnclass)
240
+
241
+ themroles = []
242
+ for trole in vnclass.findall("THEMROLES/THEMROLE"):
243
+ themroles.append(
244
+ {
245
+ "type": trole.get("type"),
246
+ "modifiers": [
247
+ {"value": restr.get("Value"), "type": restr.get("type")}
248
+ for restr in trole.findall("SELRESTRS/SELRESTR")
249
+ ],
250
+ }
251
+ )
252
+ return themroles
253
+
254
+ ######################################################################
255
+ # { Index Initialization
256
+ ######################################################################
257
+
258
+ def _index(self):
259
+ """
260
+ Initialize the indexes ``_lemma_to_class``,
261
+ ``_wordnet_to_class``, and ``_class_to_fileid`` by scanning
262
+ through the corpus fileids. This is fast if ElementTree
263
+ uses the C implementation (<0.1 secs), but quite slow (>10 secs)
264
+ if only the python implementation is available.
265
+ """
266
+ for fileid in self._fileids:
267
+ self._index_helper(self.xml(fileid), fileid)
268
+
269
+ def _index_helper(self, xmltree, fileid):
270
+ """Helper for ``_index()``"""
271
+ vnclass = xmltree.get("ID")
272
+ self._class_to_fileid[vnclass] = fileid
273
+ self._shortid_to_longid[self.shortid(vnclass)] = vnclass
274
+ for member in xmltree.findall("MEMBERS/MEMBER"):
275
+ self._lemma_to_class[member.get("name")].append(vnclass)
276
+ for wn in member.get("wn", "").split():
277
+ self._wordnet_to_class[wn].append(vnclass)
278
+ for subclass in xmltree.findall("SUBCLASSES/VNSUBCLASS"):
279
+ self._index_helper(subclass, fileid)
280
+
281
+ def _quick_index(self):
282
+ """
283
+ Initialize the indexes ``_lemma_to_class``,
284
+ ``_wordnet_to_class``, and ``_class_to_fileid`` by scanning
285
+ through the corpus fileids. This doesn't do proper xml parsing,
286
+ but is good enough to find everything in the standard VerbNet
287
+ corpus -- and it runs about 30 times faster than xml parsing
288
+ (with the python ElementTree; only 2-3 times faster
289
+ if ElementTree uses the C implementation).
290
+ """
291
+ # nb: if we got rid of wordnet_to_class, this would run 2-3
292
+ # times faster.
293
+ for fileid in self._fileids:
294
+ vnclass = fileid[:-4] # strip the '.xml'
295
+ self._class_to_fileid[vnclass] = fileid
296
+ self._shortid_to_longid[self.shortid(vnclass)] = vnclass
297
+ with self.open(fileid) as fp:
298
+ for m in self._INDEX_RE.finditer(fp.read()):
299
+ groups = m.groups()
300
+ if groups[0] is not None:
301
+ self._lemma_to_class[groups[0]].append(vnclass)
302
+ for wn in groups[1].split():
303
+ self._wordnet_to_class[wn].append(vnclass)
304
+ elif groups[2] is not None:
305
+ self._class_to_fileid[groups[2]] = fileid
306
+ vnclass = groups[2] # for <MEMBER> elts.
307
+ self._shortid_to_longid[self.shortid(vnclass)] = vnclass
308
+ else:
309
+ assert False, "unexpected match condition"
310
+
311
+ ######################################################################
312
+ # { Identifier conversion
313
+ ######################################################################
314
+
315
+ def longid(self, shortid):
316
+ """Returns longid of a VerbNet class
317
+
318
+ Given a short VerbNet class identifier (eg '37.10'), map it
319
+ to a long id (eg 'confess-37.10'). If ``shortid`` is already a
320
+ long id, then return it as-is"""
321
+ if self._LONGID_RE.match(shortid):
322
+ return shortid # it's already a longid.
323
+ elif not self._SHORTID_RE.match(shortid):
324
+ raise ValueError("vnclass identifier %r not found" % shortid)
325
+ try:
326
+ return self._shortid_to_longid[shortid]
327
+ except KeyError as e:
328
+ raise ValueError("vnclass identifier %r not found" % shortid) from e
329
+
330
+ def shortid(self, longid):
331
+ """Returns shortid of a VerbNet class
332
+
333
+ Given a long VerbNet class identifier (eg 'confess-37.10'),
334
+ map it to a short id (eg '37.10'). If ``longid`` is already a
335
+ short id, then return it as-is."""
336
+ if self._SHORTID_RE.match(longid):
337
+ return longid # it's already a shortid.
338
+ m = self._LONGID_RE.match(longid)
339
+ if m:
340
+ return m.group(2)
341
+ else:
342
+ raise ValueError("vnclass identifier %r not found" % longid)
343
+
344
+ ######################################################################
345
+ # { Frame access utility functions
346
+ ######################################################################
347
+
348
+ def _get_semantics_within_frame(self, vnframe):
349
+ """Returns semantics within a single frame
350
+
351
+ A utility function to retrieve semantics within a frame in VerbNet
352
+ Members of the semantics dictionary:
353
+ 1) Predicate value
354
+ 2) Arguments
355
+
356
+ :param vnframe: An ElementTree containing the xml contents of
357
+ a VerbNet frame.
358
+ :return: semantics: semantics dictionary
359
+ """
360
+ semantics_within_single_frame = []
361
+ for pred in vnframe.findall("SEMANTICS/PRED"):
362
+ arguments = [
363
+ {"type": arg.get("type"), "value": arg.get("value")}
364
+ for arg in pred.findall("ARGS/ARG")
365
+ ]
366
+ semantics_within_single_frame.append(
367
+ {
368
+ "predicate_value": pred.get("value"),
369
+ "arguments": arguments,
370
+ "negated": pred.get("bool") == "!",
371
+ }
372
+ )
373
+ return semantics_within_single_frame
374
+
375
+ def _get_example_within_frame(self, vnframe):
376
+ """Returns example within a frame
377
+
378
+ A utility function to retrieve an example within a frame in VerbNet.
379
+
380
+ :param vnframe: An ElementTree containing the xml contents of
381
+ a VerbNet frame.
382
+ :return: example_text: The example sentence for this particular frame
383
+ """
384
+ example_element = vnframe.find("EXAMPLES/EXAMPLE")
385
+ if example_element is not None:
386
+ example_text = example_element.text
387
+ else:
388
+ example_text = ""
389
+ return example_text
390
+
391
+ def _get_description_within_frame(self, vnframe):
392
+ """Returns member description within frame
393
+
394
+ A utility function to retrieve a description of participating members
395
+ within a frame in VerbNet.
396
+
397
+ :param vnframe: An ElementTree containing the xml contents of
398
+ a VerbNet frame.
399
+ :return: description: a description dictionary with members - primary and secondary
400
+ """
401
+ description_element = vnframe.find("DESCRIPTION")
402
+ return {
403
+ "primary": description_element.attrib["primary"],
404
+ "secondary": description_element.get("secondary", ""),
405
+ }
406
+
407
+ def _get_syntactic_list_within_frame(self, vnframe):
408
+ """Returns semantics within a frame
409
+
410
+ A utility function to retrieve semantics within a frame in VerbNet.
411
+ Members of the syntactic dictionary:
412
+ 1) POS Tag
413
+ 2) Modifiers
414
+
415
+ :param vnframe: An ElementTree containing the xml contents of
416
+ a VerbNet frame.
417
+ :return: syntax_within_single_frame
418
+ """
419
+ syntax_within_single_frame = []
420
+ for elt in vnframe.find("SYNTAX"):
421
+ pos_tag = elt.tag
422
+ modifiers = dict()
423
+ modifiers["value"] = elt.get("value") if "value" in elt.attrib else ""
424
+ modifiers["selrestrs"] = [
425
+ {"value": restr.get("Value"), "type": restr.get("type")}
426
+ for restr in elt.findall("SELRESTRS/SELRESTR")
427
+ ]
428
+ modifiers["synrestrs"] = [
429
+ {"value": restr.get("Value"), "type": restr.get("type")}
430
+ for restr in elt.findall("SYNRESTRS/SYNRESTR")
431
+ ]
432
+ syntax_within_single_frame.append(
433
+ {"pos_tag": pos_tag, "modifiers": modifiers}
434
+ )
435
+ return syntax_within_single_frame
436
+
437
+ ######################################################################
438
+ # { Pretty Printing
439
+ ######################################################################
440
+
441
+ def pprint(self, vnclass):
442
+ """Returns pretty printed version of a VerbNet class
443
+
444
+ Return a string containing a pretty-printed representation of
445
+ the given VerbNet class.
446
+
447
+ :param vnclass: A VerbNet class identifier; or an ElementTree
448
+ containing the xml contents of a VerbNet class.
449
+ """
450
+ if isinstance(vnclass, str):
451
+ vnclass = self.vnclass(vnclass)
452
+
453
+ s = vnclass.get("ID") + "\n"
454
+ s += self.pprint_subclasses(vnclass, indent=" ") + "\n"
455
+ s += self.pprint_members(vnclass, indent=" ") + "\n"
456
+ s += " Thematic roles:\n"
457
+ s += self.pprint_themroles(vnclass, indent=" ") + "\n"
458
+ s += " Frames:\n"
459
+ s += self.pprint_frames(vnclass, indent=" ")
460
+ return s
461
+
462
+ def pprint_subclasses(self, vnclass, indent=""):
463
+ """Returns pretty printed version of subclasses of VerbNet class
464
+
465
+ Return a string containing a pretty-printed representation of
466
+ the given VerbNet class's subclasses.
467
+
468
+ :param vnclass: A VerbNet class identifier; or an ElementTree
469
+ containing the xml contents of a VerbNet class.
470
+ """
471
+ if isinstance(vnclass, str):
472
+ vnclass = self.vnclass(vnclass)
473
+
474
+ subclasses = self.subclasses(vnclass)
475
+ if not subclasses:
476
+ subclasses = ["(none)"]
477
+ s = "Subclasses: " + " ".join(subclasses)
478
+ return textwrap.fill(
479
+ s, 70, initial_indent=indent, subsequent_indent=indent + " "
480
+ )
481
+
482
+ def pprint_members(self, vnclass, indent=""):
483
+ """Returns pretty printed version of members in a VerbNet class
484
+
485
+ Return a string containing a pretty-printed representation of
486
+ the given VerbNet class's member verbs.
487
+
488
+ :param vnclass: A VerbNet class identifier; or an ElementTree
489
+ containing the xml contents of a VerbNet class.
490
+ """
491
+ if isinstance(vnclass, str):
492
+ vnclass = self.vnclass(vnclass)
493
+
494
+ members = self.lemmas(vnclass)
495
+ if not members:
496
+ members = ["(none)"]
497
+ s = "Members: " + " ".join(members)
498
+ return textwrap.fill(
499
+ s, 70, initial_indent=indent, subsequent_indent=indent + " "
500
+ )
501
+
502
+ def pprint_themroles(self, vnclass, indent=""):
503
+ """Returns pretty printed version of thematic roles in a VerbNet class
504
+
505
+ Return a string containing a pretty-printed representation of
506
+ the given VerbNet class's thematic roles.
507
+
508
+ :param vnclass: A VerbNet class identifier; or an ElementTree
509
+ containing the xml contents of a VerbNet class.
510
+ """
511
+ if isinstance(vnclass, str):
512
+ vnclass = self.vnclass(vnclass)
513
+
514
+ pieces = []
515
+ for themrole in self.themroles(vnclass):
516
+ piece = indent + "* " + themrole.get("type")
517
+ modifiers = [
518
+ modifier["value"] + modifier["type"]
519
+ for modifier in themrole["modifiers"]
520
+ ]
521
+ if modifiers:
522
+ piece += "[{}]".format(" ".join(modifiers))
523
+ pieces.append(piece)
524
+ return "\n".join(pieces)
525
+
526
+ def pprint_frames(self, vnclass, indent=""):
527
+ """Returns pretty version of all frames in a VerbNet class
528
+
529
+ Return a string containing a pretty-printed representation of
530
+ the list of frames within the VerbNet class.
531
+
532
+ :param vnclass: A VerbNet class identifier; or an ElementTree
533
+ containing the xml contents of a VerbNet class.
534
+ """
535
+ if isinstance(vnclass, str):
536
+ vnclass = self.vnclass(vnclass)
537
+ pieces = []
538
+ for vnframe in self.frames(vnclass):
539
+ pieces.append(self._pprint_single_frame(vnframe, indent))
540
+ return "\n".join(pieces)
541
+
542
+ def _pprint_single_frame(self, vnframe, indent=""):
543
+ """Returns pretty printed version of a single frame in a VerbNet class
544
+
545
+ Returns a string containing a pretty-printed representation of
546
+ the given frame.
547
+
548
+ :param vnframe: An ElementTree containing the xml contents of
549
+ a VerbNet frame.
550
+ """
551
+ frame_string = self._pprint_description_within_frame(vnframe, indent) + "\n"
552
+ frame_string += self._pprint_example_within_frame(vnframe, indent + " ") + "\n"
553
+ frame_string += (
554
+ self._pprint_syntax_within_frame(vnframe, indent + " Syntax: ") + "\n"
555
+ )
556
+ frame_string += indent + " Semantics:\n"
557
+ frame_string += self._pprint_semantics_within_frame(vnframe, indent + " ")
558
+ return frame_string
559
+
560
+ def _pprint_example_within_frame(self, vnframe, indent=""):
561
+ """Returns pretty printed version of example within frame in a VerbNet class
562
+
563
+ Return a string containing a pretty-printed representation of
564
+ the given VerbNet frame example.
565
+
566
+ :param vnframe: An ElementTree containing the xml contents of
567
+ a Verbnet frame.
568
+ """
569
+ if vnframe["example"]:
570
+ return indent + " Example: " + vnframe["example"]
571
+
572
+ def _pprint_description_within_frame(self, vnframe, indent=""):
573
+ """Returns pretty printed version of a VerbNet frame description
574
+
575
+ Return a string containing a pretty-printed representation of
576
+ the given VerbNet frame description.
577
+
578
+ :param vnframe: An ElementTree containing the xml contents of
579
+ a VerbNet frame.
580
+ """
581
+ description = indent + vnframe["description"]["primary"]
582
+ if vnframe["description"]["secondary"]:
583
+ description += " ({})".format(vnframe["description"]["secondary"])
584
+ return description
585
+
586
+ def _pprint_syntax_within_frame(self, vnframe, indent=""):
587
+ """Returns pretty printed version of syntax within a frame in a VerbNet class
588
+
589
+ Return a string containing a pretty-printed representation of
590
+ the given VerbNet frame syntax.
591
+
592
+ :param vnframe: An ElementTree containing the xml contents of
593
+ a VerbNet frame.
594
+ """
595
+ pieces = []
596
+ for element in vnframe["syntax"]:
597
+ piece = element["pos_tag"]
598
+ modifier_list = []
599
+ if "value" in element["modifiers"] and element["modifiers"]["value"]:
600
+ modifier_list.append(element["modifiers"]["value"])
601
+ modifier_list += [
602
+ "{}{}".format(restr["value"], restr["type"])
603
+ for restr in (
604
+ element["modifiers"]["selrestrs"]
605
+ + element["modifiers"]["synrestrs"]
606
+ )
607
+ ]
608
+ if modifier_list:
609
+ piece += "[{}]".format(" ".join(modifier_list))
610
+ pieces.append(piece)
611
+
612
+ return indent + " ".join(pieces)
613
+
614
+ def _pprint_semantics_within_frame(self, vnframe, indent=""):
615
+ """Returns a pretty printed version of semantics within frame in a VerbNet class
616
+
617
+ Return a string containing a pretty-printed representation of
618
+ the given VerbNet frame semantics.
619
+
620
+ :param vnframe: An ElementTree containing the xml contents of
621
+ a VerbNet frame.
622
+ """
623
+ pieces = []
624
+ for predicate in vnframe["semantics"]:
625
+ arguments = [argument["value"] for argument in predicate["arguments"]]
626
+ pieces.append(
627
+ f"{'¬' if predicate['negated'] else ''}{predicate['predicate_value']}({', '.join(arguments)})"
628
+ )
629
+ return "\n".join(f"{indent}* {piece}" for piece in pieces)
lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Word List Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ from nltk.corpus.reader.api import *
9
+ from nltk.corpus.reader.util import *
10
+ from nltk.tokenize import line_tokenize
11
+
12
+
13
+ class WordListCorpusReader(CorpusReader):
14
+ """
15
+ List of words, one per line. Blank lines are ignored.
16
+ """
17
+
18
+ def words(self, fileids=None, ignore_lines_startswith="\n"):
19
+ return [
20
+ line
21
+ for line in line_tokenize(self.raw(fileids))
22
+ if not line.startswith(ignore_lines_startswith)
23
+ ]
24
+
25
+
26
+ class SwadeshCorpusReader(WordListCorpusReader):
27
+ def entries(self, fileids=None):
28
+ """
29
+ :return: a tuple of words for the specified fileids.
30
+ """
31
+ if not fileids:
32
+ fileids = self.fileids()
33
+
34
+ wordlists = [self.words(f) for f in fileids]
35
+ return list(zip(*wordlists))
36
+
37
+
38
+ class NonbreakingPrefixesCorpusReader(WordListCorpusReader):
39
+ """
40
+ This is a class to read the nonbreaking prefixes textfiles from the
41
+ Moses Machine Translation toolkit. These lists are used in the Python port
42
+ of the Moses' word tokenizer.
43
+ """
44
+
45
+ available_langs = {
46
+ "catalan": "ca",
47
+ "czech": "cs",
48
+ "german": "de",
49
+ "greek": "el",
50
+ "english": "en",
51
+ "spanish": "es",
52
+ "finnish": "fi",
53
+ "french": "fr",
54
+ "hungarian": "hu",
55
+ "icelandic": "is",
56
+ "italian": "it",
57
+ "latvian": "lv",
58
+ "dutch": "nl",
59
+ "polish": "pl",
60
+ "portuguese": "pt",
61
+ "romanian": "ro",
62
+ "russian": "ru",
63
+ "slovak": "sk",
64
+ "slovenian": "sl",
65
+ "swedish": "sv",
66
+ "tamil": "ta",
67
+ }
68
+ # Also, add the lang IDs as the keys.
69
+ available_langs.update({v: v for v in available_langs.values()})
70
+
71
+ def words(self, lang=None, fileids=None, ignore_lines_startswith="#"):
72
+ """
73
+ This module returns a list of nonbreaking prefixes for the specified
74
+ language(s).
75
+
76
+ >>> from nltk.corpus import nonbreaking_prefixes as nbp
77
+ >>> nbp.words('en')[:10] == [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'J']
78
+ True
79
+ >>> nbp.words('ta')[:5] == [u'\u0b85', u'\u0b86', u'\u0b87', u'\u0b88', u'\u0b89']
80
+ True
81
+
82
+ :return: a list words for the specified language(s).
83
+ """
84
+ # If *lang* in list of languages available, allocate apt fileid.
85
+ # Otherwise, the function returns non-breaking prefixes for
86
+ # all languages when fileids==None.
87
+ if lang in self.available_langs:
88
+ lang = self.available_langs[lang]
89
+ fileids = ["nonbreaking_prefix." + lang]
90
+ return [
91
+ line
92
+ for line in line_tokenize(self.raw(fileids))
93
+ if not line.startswith(ignore_lines_startswith)
94
+ ]
95
+
96
+
97
+ class UnicharsCorpusReader(WordListCorpusReader):
98
+ """
99
+ This class is used to read lists of characters from the Perl Unicode
100
+ Properties (see https://perldoc.perl.org/perluniprops.html).
101
+ The files in the perluniprop.zip are extracted using the Unicode::Tussle
102
+ module from https://search.cpan.org/~bdfoy/Unicode-Tussle-1.11/lib/Unicode/Tussle.pm
103
+ """
104
+
105
+ # These are categories similar to the Perl Unicode Properties
106
+ available_categories = [
107
+ "Close_Punctuation",
108
+ "Currency_Symbol",
109
+ "IsAlnum",
110
+ "IsAlpha",
111
+ "IsLower",
112
+ "IsN",
113
+ "IsSc",
114
+ "IsSo",
115
+ "IsUpper",
116
+ "Line_Separator",
117
+ "Number",
118
+ "Open_Punctuation",
119
+ "Punctuation",
120
+ "Separator",
121
+ "Symbol",
122
+ ]
123
+
124
+ def chars(self, category=None, fileids=None):
125
+ """
126
+ This module returns a list of characters from the Perl Unicode Properties.
127
+ They are very useful when porting Perl tokenizers to Python.
128
+
129
+ >>> from nltk.corpus import perluniprops as pup
130
+ >>> pup.chars('Open_Punctuation')[:5] == [u'(', u'[', u'{', u'\u0f3a', u'\u0f3c']
131
+ True
132
+ >>> pup.chars('Currency_Symbol')[:5] == [u'$', u'\xa2', u'\xa3', u'\xa4', u'\xa5']
133
+ True
134
+ >>> pup.available_categories
135
+ ['Close_Punctuation', 'Currency_Symbol', 'IsAlnum', 'IsAlpha', 'IsLower', 'IsN', 'IsSc', 'IsSo', 'IsUpper', 'Line_Separator', 'Number', 'Open_Punctuation', 'Punctuation', 'Separator', 'Symbol']
136
+
137
+ :return: a list of characters given the specific unicode character category
138
+ """
139
+ if category in self.available_categories:
140
+ fileids = [category + ".txt"]
141
+ return list(self.raw(fileids).strip())
142
+
143
+
144
+ class MWAPPDBCorpusReader(WordListCorpusReader):
145
+ """
146
+ This class is used to read the list of word pairs from the subset of lexical
147
+ pairs of The Paraphrase Database (PPDB) XXXL used in the Monolingual Word
148
+ Alignment (MWA) algorithm described in Sultan et al. (2014a, 2014b, 2015):
149
+
150
+ - http://acl2014.org/acl2014/Q14/pdf/Q14-1017
151
+ - https://www.aclweb.org/anthology/S14-2039
152
+ - https://www.aclweb.org/anthology/S15-2027
153
+
154
+ The original source of the full PPDB corpus can be found on
155
+ https://www.cis.upenn.edu/~ccb/ppdb/
156
+
157
+ :return: a list of tuples of similar lexical terms.
158
+ """
159
+
160
+ mwa_ppdb_xxxl_file = "ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs"
161
+
162
+ def entries(self, fileids=mwa_ppdb_xxxl_file):
163
+ """
164
+ :return: a tuple of synonym word pairs.
165
+ """
166
+ return [tuple(line.split("\t")) for line in line_tokenize(self.raw(fileids))]
lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py ADDED
@@ -0,0 +1,2489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: WordNet
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bethard <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Edward Loper <[email protected]>
7
+ # Nitin Madnani <[email protected]>
8
+ # Nasruddin A’aidil Shari
9
+ # Sim Wei Ying Geraldine
10
+ # Soe Lynn
11
+ # Francis Bond <[email protected]>
12
+ # Eric Kafe <[email protected]>
13
+
14
+ # URL: <https://www.nltk.org/>
15
+ # For license information, see LICENSE.TXT
16
+
17
+ """
18
+ An NLTK interface for WordNet
19
+
20
+ WordNet is a lexical database of English.
21
+ Using synsets, helps find conceptual relationships between words
22
+ such as hypernyms, hyponyms, synonyms, antonyms etc.
23
+
24
+ For details about WordNet see:
25
+ https://wordnet.princeton.edu/
26
+
27
+ This module also allows you to find lemmas in languages
28
+ other than English from the Open Multilingual Wordnet
29
+ https://omwn.org/
30
+
31
+ """
32
+
33
+ import math
34
+ import os
35
+ import re
36
+ import warnings
37
+ from collections import defaultdict, deque
38
+ from functools import total_ordering
39
+ from itertools import chain, islice
40
+ from operator import itemgetter
41
+
42
+ from nltk.corpus.reader import CorpusReader
43
+ from nltk.internals import deprecated
44
+ from nltk.probability import FreqDist
45
+ from nltk.util import binary_search_file as _binary_search_file
46
+
47
+ ######################################################################
48
+ # Table of Contents
49
+ ######################################################################
50
+ # - Constants
51
+ # - Data Classes
52
+ # - WordNetError
53
+ # - Lemma
54
+ # - Synset
55
+ # - WordNet Corpus Reader
56
+ # - WordNet Information Content Corpus Reader
57
+ # - Similarity Metrics
58
+ # - Demo
59
+
60
+ ######################################################################
61
+ # Constants
62
+ ######################################################################
63
+
64
+ #: Positive infinity (for similarity functions)
65
+ _INF = 1e300
66
+
67
+ # { Part-of-speech constants
68
+ ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v"
69
+ # }
70
+
71
+ POS_LIST = [NOUN, VERB, ADJ, ADV]
72
+
73
+ # A table of strings that are used to express verb frames.
74
+ VERB_FRAME_STRINGS = (
75
+ None,
76
+ "Something %s",
77
+ "Somebody %s",
78
+ "It is %sing",
79
+ "Something is %sing PP",
80
+ "Something %s something Adjective/Noun",
81
+ "Something %s Adjective/Noun",
82
+ "Somebody %s Adjective",
83
+ "Somebody %s something",
84
+ "Somebody %s somebody",
85
+ "Something %s somebody",
86
+ "Something %s something",
87
+ "Something %s to somebody",
88
+ "Somebody %s on something",
89
+ "Somebody %s somebody something",
90
+ "Somebody %s something to somebody",
91
+ "Somebody %s something from somebody",
92
+ "Somebody %s somebody with something",
93
+ "Somebody %s somebody of something",
94
+ "Somebody %s something on somebody",
95
+ "Somebody %s somebody PP",
96
+ "Somebody %s something PP",
97
+ "Somebody %s PP",
98
+ "Somebody's (body part) %s",
99
+ "Somebody %s somebody to INFINITIVE",
100
+ "Somebody %s somebody INFINITIVE",
101
+ "Somebody %s that CLAUSE",
102
+ "Somebody %s to somebody",
103
+ "Somebody %s to INFINITIVE",
104
+ "Somebody %s whether INFINITIVE",
105
+ "Somebody %s somebody into V-ing something",
106
+ "Somebody %s something with something",
107
+ "Somebody %s INFINITIVE",
108
+ "Somebody %s VERB-ing",
109
+ "It %s that CLAUSE",
110
+ "Something %s INFINITIVE",
111
+ # OEWN additions:
112
+ "Somebody %s at something",
113
+ "Somebody %s for something",
114
+ "Somebody %s on somebody",
115
+ "Somebody %s out of somebody",
116
+ )
117
+
118
+ SENSENUM_RE = re.compile(r"\.[\d]+\.")
119
+
120
+
121
+ ######################################################################
122
+ # Data Classes
123
+ ######################################################################
124
+
125
+
126
+ class WordNetError(Exception):
127
+ """An exception class for wordnet-related errors."""
128
+
129
+
130
+ @total_ordering
131
+ class _WordNetObject:
132
+ """A common base class for lemmas and synsets."""
133
+
134
+ def hypernyms(self):
135
+ return self._related("@")
136
+
137
+ def _hypernyms(self):
138
+ return self._related("@")
139
+
140
+ def instance_hypernyms(self):
141
+ return self._related("@i")
142
+
143
+ def _instance_hypernyms(self):
144
+ return self._related("@i")
145
+
146
+ def hyponyms(self):
147
+ return self._related("~")
148
+
149
+ def instance_hyponyms(self):
150
+ return self._related("~i")
151
+
152
+ def member_holonyms(self):
153
+ return self._related("#m")
154
+
155
+ def substance_holonyms(self):
156
+ return self._related("#s")
157
+
158
+ def part_holonyms(self):
159
+ return self._related("#p")
160
+
161
+ def member_meronyms(self):
162
+ return self._related("%m")
163
+
164
+ def substance_meronyms(self):
165
+ return self._related("%s")
166
+
167
+ def part_meronyms(self):
168
+ return self._related("%p")
169
+
170
+ def topic_domains(self):
171
+ return self._related(";c")
172
+
173
+ def in_topic_domains(self):
174
+ return self._related("-c")
175
+
176
+ def region_domains(self):
177
+ return self._related(";r")
178
+
179
+ def in_region_domains(self):
180
+ return self._related("-r")
181
+
182
+ def usage_domains(self):
183
+ return self._related(";u")
184
+
185
+ def in_usage_domains(self):
186
+ return self._related("-u")
187
+
188
+ def attributes(self):
189
+ return self._related("=")
190
+
191
+ def entailments(self):
192
+ return self._related("*")
193
+
194
+ def causes(self):
195
+ return self._related(">")
196
+
197
+ def also_sees(self):
198
+ return self._related("^")
199
+
200
+ def verb_groups(self):
201
+ return self._related("$")
202
+
203
+ def similar_tos(self):
204
+ return self._related("&")
205
+
206
+ def __hash__(self):
207
+ return hash(self._name)
208
+
209
+ def __eq__(self, other):
210
+ return self._name == other._name
211
+
212
+ def __ne__(self, other):
213
+ return self._name != other._name
214
+
215
+ def __lt__(self, other):
216
+ return self._name < other._name
217
+
218
+
219
+ class Lemma(_WordNetObject):
220
+ """
221
+ The lexical entry for a single morphological form of a
222
+ sense-disambiguated word.
223
+
224
+ Create a Lemma from a "<word>.<pos>.<number>.<lemma>" string where:
225
+ <word> is the morphological stem identifying the synset
226
+ <pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
227
+ <number> is the sense number, counting from 0.
228
+ <lemma> is the morphological form of interest
229
+
230
+ Note that <word> and <lemma> can be different, e.g. the Synset
231
+ 'salt.n.03' has the Lemmas 'salt.n.03.salt', 'salt.n.03.saltiness' and
232
+ 'salt.n.03.salinity'.
233
+
234
+ Lemma attributes, accessible via methods with the same name:
235
+
236
+ - name: The canonical name of this lemma.
237
+ - synset: The synset that this lemma belongs to.
238
+ - syntactic_marker: For adjectives, the WordNet string identifying the
239
+ syntactic position relative modified noun. See:
240
+ https://wordnet.princeton.edu/documentation/wninput5wn
241
+ For all other parts of speech, this attribute is None.
242
+ - count: The frequency of this lemma in wordnet.
243
+
244
+ Lemma methods:
245
+
246
+ Lemmas have the following methods for retrieving related Lemmas. They
247
+ correspond to the names for the pointer symbols defined here:
248
+ https://wordnet.princeton.edu/documentation/wninput5wn
249
+ These methods all return lists of Lemmas:
250
+
251
+ - antonyms
252
+ - hypernyms, instance_hypernyms
253
+ - hyponyms, instance_hyponyms
254
+ - member_holonyms, substance_holonyms, part_holonyms
255
+ - member_meronyms, substance_meronyms, part_meronyms
256
+ - topic_domains, region_domains, usage_domains
257
+ - attributes
258
+ - derivationally_related_forms
259
+ - entailments
260
+ - causes
261
+ - also_sees
262
+ - verb_groups
263
+ - similar_tos
264
+ - pertainyms
265
+ """
266
+
267
+ __slots__ = [
268
+ "_wordnet_corpus_reader",
269
+ "_name",
270
+ "_syntactic_marker",
271
+ "_synset",
272
+ "_frame_strings",
273
+ "_frame_ids",
274
+ "_lexname_index",
275
+ "_lex_id",
276
+ "_lang",
277
+ "_key",
278
+ ]
279
+
280
+ def __init__(
281
+ self,
282
+ wordnet_corpus_reader,
283
+ synset,
284
+ name,
285
+ lexname_index,
286
+ lex_id,
287
+ syntactic_marker,
288
+ ):
289
+ self._wordnet_corpus_reader = wordnet_corpus_reader
290
+ self._name = name
291
+ self._syntactic_marker = syntactic_marker
292
+ self._synset = synset
293
+ self._frame_strings = []
294
+ self._frame_ids = []
295
+ self._lexname_index = lexname_index
296
+ self._lex_id = lex_id
297
+ self._lang = "eng"
298
+
299
+ self._key = None # gets set later.
300
+
301
+ def name(self):
302
+ return self._name
303
+
304
+ def syntactic_marker(self):
305
+ return self._syntactic_marker
306
+
307
+ def synset(self):
308
+ return self._synset
309
+
310
+ def frame_strings(self):
311
+ return self._frame_strings
312
+
313
+ def frame_ids(self):
314
+ return self._frame_ids
315
+
316
+ def lang(self):
317
+ return self._lang
318
+
319
+ def key(self):
320
+ return self._key
321
+
322
+ def __repr__(self):
323
+ tup = type(self).__name__, self._synset._name, self._name
324
+ return "%s('%s.%s')" % tup
325
+
326
+ def _related(self, relation_symbol):
327
+ get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset
328
+ if (self._name, relation_symbol) not in self._synset._lemma_pointers:
329
+ return []
330
+ return [
331
+ get_synset(pos, offset)._lemmas[lemma_index]
332
+ for pos, offset, lemma_index in self._synset._lemma_pointers[
333
+ self._name, relation_symbol
334
+ ]
335
+ ]
336
+
337
+ def count(self):
338
+ """Return the frequency count for this Lemma"""
339
+ return self._wordnet_corpus_reader.lemma_count(self)
340
+
341
+ def antonyms(self):
342
+ return self._related("!")
343
+
344
+ def derivationally_related_forms(self):
345
+ return self._related("+")
346
+
347
+ def pertainyms(self):
348
+ return self._related("\\")
349
+
350
+
351
+ class Synset(_WordNetObject):
352
+ """Create a Synset from a "<lemma>.<pos>.<number>" string where:
353
+ <lemma> is the word's morphological stem
354
+ <pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
355
+ <number> is the sense number, counting from 0.
356
+
357
+ Synset attributes, accessible via methods with the same name:
358
+
359
+ - name: The canonical name of this synset, formed using the first lemma
360
+ of this synset. Note that this may be different from the name
361
+ passed to the constructor if that string used a different lemma to
362
+ identify the synset.
363
+ - pos: The synset's part of speech, matching one of the module level
364
+ attributes ADJ, ADJ_SAT, ADV, NOUN or VERB.
365
+ - lemmas: A list of the Lemma objects for this synset.
366
+ - definition: The definition for this synset.
367
+ - examples: A list of example strings for this synset.
368
+ - offset: The offset in the WordNet dict file of this synset.
369
+ - lexname: The name of the lexicographer file containing this synset.
370
+
371
+ Synset methods:
372
+
373
+ Synsets have the following methods for retrieving related Synsets.
374
+ They correspond to the names for the pointer symbols defined here:
375
+ https://wordnet.princeton.edu/documentation/wninput5wn
376
+ These methods all return lists of Synsets.
377
+
378
+ - hypernyms, instance_hypernyms
379
+ - hyponyms, instance_hyponyms
380
+ - member_holonyms, substance_holonyms, part_holonyms
381
+ - member_meronyms, substance_meronyms, part_meronyms
382
+ - attributes
383
+ - entailments
384
+ - causes
385
+ - also_sees
386
+ - verb_groups
387
+ - similar_tos
388
+
389
+ Additionally, Synsets support the following methods specific to the
390
+ hypernym relation:
391
+
392
+ - root_hypernyms
393
+ - common_hypernyms
394
+ - lowest_common_hypernyms
395
+
396
+ Note that Synsets do not support the following relations because
397
+ these are defined by WordNet as lexical relations:
398
+
399
+ - antonyms
400
+ - derivationally_related_forms
401
+ - pertainyms
402
+ """
403
+
404
+ __slots__ = [
405
+ "_pos",
406
+ "_offset",
407
+ "_name",
408
+ "_frame_ids",
409
+ "_lemmas",
410
+ "_lemma_names",
411
+ "_definition",
412
+ "_examples",
413
+ "_lexname",
414
+ "_pointers",
415
+ "_lemma_pointers",
416
+ "_max_depth",
417
+ "_min_depth",
418
+ ]
419
+
420
+ def __init__(self, wordnet_corpus_reader):
421
+ self._wordnet_corpus_reader = wordnet_corpus_reader
422
+ # All of these attributes get initialized by
423
+ # WordNetCorpusReader._synset_from_pos_and_line()
424
+
425
+ self._pos = None
426
+ self._offset = None
427
+ self._name = None
428
+ self._frame_ids = []
429
+ self._lemmas = []
430
+ self._lemma_names = []
431
+ self._definition = None
432
+ self._examples = []
433
+ self._lexname = None # lexicographer name
434
+ self._all_hypernyms = None
435
+
436
+ self._pointers = defaultdict(set)
437
+ self._lemma_pointers = defaultdict(list)
438
+
439
+ def pos(self):
440
+ return self._pos
441
+
442
+ def offset(self):
443
+ return self._offset
444
+
445
+ def name(self):
446
+ return self._name
447
+
448
+ def frame_ids(self):
449
+ return self._frame_ids
450
+
451
+ def _doc(self, doc_type, default, lang="eng"):
452
+ """Helper method for Synset.definition and Synset.examples"""
453
+ corpus = self._wordnet_corpus_reader
454
+ if lang not in corpus.langs():
455
+ return None
456
+ elif lang == "eng":
457
+ return default
458
+ else:
459
+ corpus._load_lang_data(lang)
460
+ of = corpus.ss2of(self)
461
+ i = corpus.lg_attrs.index(doc_type)
462
+ if of in corpus._lang_data[lang][i]:
463
+ return corpus._lang_data[lang][i][of]
464
+ else:
465
+ return None
466
+
467
+ def definition(self, lang="eng"):
468
+ """Return definition in specified language"""
469
+ return self._doc("def", self._definition, lang=lang)
470
+
471
+ def examples(self, lang="eng"):
472
+ """Return examples in specified language"""
473
+ return self._doc("exe", self._examples, lang=lang)
474
+
475
+ def lexname(self):
476
+ return self._lexname
477
+
478
+ def _needs_root(self):
479
+ if self._pos == NOUN and self._wordnet_corpus_reader.get_version() != "1.6":
480
+ return False
481
+ else:
482
+ return True
483
+
484
+ def lemma_names(self, lang="eng"):
485
+ """Return all the lemma_names associated with the synset"""
486
+ if lang == "eng":
487
+ return self._lemma_names
488
+ else:
489
+ reader = self._wordnet_corpus_reader
490
+ reader._load_lang_data(lang)
491
+ i = reader.ss2of(self)
492
+ if i in reader._lang_data[lang][0]:
493
+ return reader._lang_data[lang][0][i]
494
+ else:
495
+ return []
496
+
497
+ def lemmas(self, lang="eng"):
498
+ """Return all the lemma objects associated with the synset"""
499
+ if lang == "eng":
500
+ return self._lemmas
501
+ elif self._name:
502
+ self._wordnet_corpus_reader._load_lang_data(lang)
503
+ lemmark = []
504
+ lemmy = self.lemma_names(lang)
505
+ for lem in lemmy:
506
+ temp = Lemma(
507
+ self._wordnet_corpus_reader,
508
+ self,
509
+ lem,
510
+ self._wordnet_corpus_reader._lexnames.index(self.lexname()),
511
+ 0,
512
+ None,
513
+ )
514
+ temp._lang = lang
515
+ lemmark.append(temp)
516
+ return lemmark
517
+
518
+ def root_hypernyms(self):
519
+ """Get the topmost hypernyms of this synset in WordNet."""
520
+
521
+ result = []
522
+ seen = set()
523
+ todo = [self]
524
+ while todo:
525
+ next_synset = todo.pop()
526
+ if next_synset not in seen:
527
+ seen.add(next_synset)
528
+ next_hypernyms = (
529
+ next_synset.hypernyms() + next_synset.instance_hypernyms()
530
+ )
531
+ if not next_hypernyms:
532
+ result.append(next_synset)
533
+ else:
534
+ todo.extend(next_hypernyms)
535
+ return result
536
+
537
+ # Simpler implementation which makes incorrect assumption that
538
+ # hypernym hierarchy is acyclic:
539
+ #
540
+ # if not self.hypernyms():
541
+ # return [self]
542
+ # else:
543
+ # return list(set(root for h in self.hypernyms()
544
+ # for root in h.root_hypernyms()))
545
+ def max_depth(self):
546
+ """
547
+ :return: The length of the longest hypernym path from this
548
+ synset to the root.
549
+ """
550
+
551
+ if "_max_depth" not in self.__dict__:
552
+ hypernyms = self.hypernyms() + self.instance_hypernyms()
553
+ if not hypernyms:
554
+ self._max_depth = 0
555
+ else:
556
+ self._max_depth = 1 + max(h.max_depth() for h in hypernyms)
557
+ return self._max_depth
558
+
559
+ def min_depth(self):
560
+ """
561
+ :return: The length of the shortest hypernym path from this
562
+ synset to the root.
563
+ """
564
+
565
+ if "_min_depth" not in self.__dict__:
566
+ hypernyms = self.hypernyms() + self.instance_hypernyms()
567
+ if not hypernyms:
568
+ self._min_depth = 0
569
+ else:
570
+ self._min_depth = 1 + min(h.min_depth() for h in hypernyms)
571
+ return self._min_depth
572
+
573
+ def closure(self, rel, depth=-1):
574
+ """
575
+ Return the transitive closure of source under the rel
576
+ relationship, breadth-first, discarding cycles:
577
+
578
+ >>> from nltk.corpus import wordnet as wn
579
+ >>> computer = wn.synset('computer.n.01')
580
+ >>> topic = lambda s:s.topic_domains()
581
+ >>> print(list(computer.closure(topic)))
582
+ [Synset('computer_science.n.01')]
583
+
584
+ UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2
585
+
586
+
587
+ Include redundant paths (but only once), avoiding duplicate searches
588
+ (from 'animal.n.01' to 'entity.n.01'):
589
+
590
+ >>> dog = wn.synset('dog.n.01')
591
+ >>> hyp = lambda s:s.hypernyms()
592
+ >>> print(list(dog.closure(hyp)))
593
+ [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\
594
+ Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\
595
+ Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\
596
+ Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\
597
+ Synset('physical_entity.n.01'), Synset('entity.n.01')]
598
+
599
+ UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7
600
+ """
601
+
602
+ from nltk.util import acyclic_breadth_first
603
+
604
+ for synset in acyclic_breadth_first(self, rel, depth):
605
+ if synset != self:
606
+ yield synset
607
+
608
+ from nltk.util import acyclic_depth_first as acyclic_tree
609
+ from nltk.util import unweighted_minimum_spanning_tree as mst
610
+
611
+ # Also add this shortcut?
612
+ # from nltk.util import unweighted_minimum_spanning_digraph as umsd
613
+
614
+ def tree(self, rel, depth=-1, cut_mark=None):
615
+ """
616
+ Return the full relation tree, including self,
617
+ discarding cycles:
618
+
619
+ >>> from nltk.corpus import wordnet as wn
620
+ >>> from pprint import pprint
621
+ >>> computer = wn.synset('computer.n.01')
622
+ >>> topic = lambda s:s.topic_domains()
623
+ >>> pprint(computer.tree(topic))
624
+ [Synset('computer.n.01'), [Synset('computer_science.n.01')]]
625
+
626
+ UserWarning: Discarded redundant search for Synset('computer.n.01') at depth -3
627
+
628
+
629
+ But keep duplicate branches (from 'animal.n.01' to 'entity.n.01'):
630
+
631
+ >>> dog = wn.synset('dog.n.01')
632
+ >>> hyp = lambda s:s.hypernyms()
633
+ >>> pprint(dog.tree(hyp))
634
+ [Synset('dog.n.01'),
635
+ [Synset('canine.n.02'),
636
+ [Synset('carnivore.n.01'),
637
+ [Synset('placental.n.01'),
638
+ [Synset('mammal.n.01'),
639
+ [Synset('vertebrate.n.01'),
640
+ [Synset('chordate.n.01'),
641
+ [Synset('animal.n.01'),
642
+ [Synset('organism.n.01'),
643
+ [Synset('living_thing.n.01'),
644
+ [Synset('whole.n.02'),
645
+ [Synset('object.n.01'),
646
+ [Synset('physical_entity.n.01'),
647
+ [Synset('entity.n.01')]]]]]]]]]]]]],
648
+ [Synset('domestic_animal.n.01'),
649
+ [Synset('animal.n.01'),
650
+ [Synset('organism.n.01'),
651
+ [Synset('living_thing.n.01'),
652
+ [Synset('whole.n.02'),
653
+ [Synset('object.n.01'),
654
+ [Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]]
655
+ """
656
+
657
+ from nltk.util import acyclic_branches_depth_first
658
+
659
+ return acyclic_branches_depth_first(self, rel, depth, cut_mark)
660
+
661
+ def hypernym_paths(self):
662
+ """
663
+ Get the path(s) from this synset to the root, where each path is a
664
+ list of the synset nodes traversed on the way to the root.
665
+
666
+ :return: A list of lists, where each list gives the node sequence
667
+ connecting the initial ``Synset`` node and a root node.
668
+ """
669
+ paths = []
670
+
671
+ hypernyms = self.hypernyms() + self.instance_hypernyms()
672
+ if len(hypernyms) == 0:
673
+ paths = [[self]]
674
+
675
+ for hypernym in hypernyms:
676
+ for ancestor_list in hypernym.hypernym_paths():
677
+ ancestor_list.append(self)
678
+ paths.append(ancestor_list)
679
+ return paths
680
+
681
+ def common_hypernyms(self, other):
682
+ """
683
+ Find all synsets that are hypernyms of this synset and the
684
+ other synset.
685
+
686
+ :type other: Synset
687
+ :param other: other input synset.
688
+ :return: The synsets that are hypernyms of both synsets.
689
+ """
690
+ if not self._all_hypernyms:
691
+ self._all_hypernyms = {
692
+ self_synset
693
+ for self_synsets in self._iter_hypernym_lists()
694
+ for self_synset in self_synsets
695
+ }
696
+ if not other._all_hypernyms:
697
+ other._all_hypernyms = {
698
+ other_synset
699
+ for other_synsets in other._iter_hypernym_lists()
700
+ for other_synset in other_synsets
701
+ }
702
+ return list(self._all_hypernyms.intersection(other._all_hypernyms))
703
+
704
+ def lowest_common_hypernyms(self, other, simulate_root=False, use_min_depth=False):
705
+ """
706
+ Get a list of lowest synset(s) that both synsets have as a hypernym.
707
+ When `use_min_depth == False` this means that the synset which appears
708
+ as a hypernym of both `self` and `other` with the lowest maximum depth
709
+ is returned or if there are multiple such synsets at the same depth
710
+ they are all returned
711
+
712
+ However, if `use_min_depth == True` then the synset(s) which has/have
713
+ the lowest minimum depth and appear(s) in both paths is/are returned.
714
+
715
+ By setting the use_min_depth flag to True, the behavior of NLTK2 can be
716
+ preserved. This was changed in NLTK3 to give more accurate results in a
717
+ small set of cases, generally with synsets concerning people. (eg:
718
+ 'chef.n.01', 'fireman.n.01', etc.)
719
+
720
+ This method is an implementation of Ted Pedersen's "Lowest Common
721
+ Subsumer" method from the Perl Wordnet module. It can return either
722
+ "self" or "other" if they are a hypernym of the other.
723
+
724
+ :type other: Synset
725
+ :param other: other input synset
726
+ :type simulate_root: bool
727
+ :param simulate_root: The various verb taxonomies do not
728
+ share a single root which disallows this metric from working for
729
+ synsets that are not connected. This flag (False by default)
730
+ creates a fake root that connects all the taxonomies. Set it
731
+ to True to enable this behavior. For the noun taxonomy,
732
+ there is usually a default root except for WordNet version 1.6.
733
+ If you are using wordnet 1.6, a fake root will need to be added
734
+ for nouns as well.
735
+ :type use_min_depth: bool
736
+ :param use_min_depth: This setting mimics older (v2) behavior of NLTK
737
+ wordnet If True, will use the min_depth function to calculate the
738
+ lowest common hypernyms. This is known to give strange results for
739
+ some synset pairs (eg: 'chef.n.01', 'fireman.n.01') but is retained
740
+ for backwards compatibility
741
+ :return: The synsets that are the lowest common hypernyms of both
742
+ synsets
743
+ """
744
+ synsets = self.common_hypernyms(other)
745
+ if simulate_root:
746
+ fake_synset = Synset(None)
747
+ fake_synset._name = "*ROOT*"
748
+ fake_synset.hypernyms = lambda: []
749
+ fake_synset.instance_hypernyms = lambda: []
750
+ synsets.append(fake_synset)
751
+
752
+ try:
753
+ if use_min_depth:
754
+ max_depth = max(s.min_depth() for s in synsets)
755
+ unsorted_lch = [s for s in synsets if s.min_depth() == max_depth]
756
+ else:
757
+ max_depth = max(s.max_depth() for s in synsets)
758
+ unsorted_lch = [s for s in synsets if s.max_depth() == max_depth]
759
+ return sorted(unsorted_lch)
760
+ except ValueError:
761
+ return []
762
+
763
+ def hypernym_distances(self, distance=0, simulate_root=False):
764
+ """
765
+ Get the path(s) from this synset to the root, counting the distance
766
+ of each node from the initial node on the way. A set of
767
+ (synset, distance) tuples is returned.
768
+
769
+ :type distance: int
770
+ :param distance: the distance (number of edges) from this hypernym to
771
+ the original hypernym ``Synset`` on which this method was called.
772
+ :return: A set of ``(Synset, int)`` tuples where each ``Synset`` is
773
+ a hypernym of the first ``Synset``.
774
+ """
775
+ distances = {(self, distance)}
776
+ for hypernym in self._hypernyms() + self._instance_hypernyms():
777
+ distances |= hypernym.hypernym_distances(distance + 1, simulate_root=False)
778
+ if simulate_root:
779
+ fake_synset = Synset(None)
780
+ fake_synset._name = "*ROOT*"
781
+ fake_synset_distance = max(distances, key=itemgetter(1))[1]
782
+ distances.add((fake_synset, fake_synset_distance + 1))
783
+ return distances
784
+
785
+ def _shortest_hypernym_paths(self, simulate_root):
786
+ if self._name == "*ROOT*":
787
+ return {self: 0}
788
+
789
+ queue = deque([(self, 0)])
790
+ path = {}
791
+
792
+ while queue:
793
+ s, depth = queue.popleft()
794
+ if s in path:
795
+ continue
796
+ path[s] = depth
797
+
798
+ depth += 1
799
+ queue.extend((hyp, depth) for hyp in s._hypernyms())
800
+ queue.extend((hyp, depth) for hyp in s._instance_hypernyms())
801
+
802
+ if simulate_root:
803
+ fake_synset = Synset(None)
804
+ fake_synset._name = "*ROOT*"
805
+ path[fake_synset] = max(path.values()) + 1
806
+
807
+ return path
808
+
809
+ def shortest_path_distance(self, other, simulate_root=False):
810
+ """
811
+ Returns the distance of the shortest path linking the two synsets (if
812
+ one exists). For each synset, all the ancestor nodes and their
813
+ distances are recorded and compared. The ancestor node common to both
814
+ synsets that can be reached with the minimum number of traversals is
815
+ used. If no ancestor nodes are common, None is returned. If a node is
816
+ compared with itself 0 is returned.
817
+
818
+ :type other: Synset
819
+ :param other: The Synset to which the shortest path will be found.
820
+ :return: The number of edges in the shortest path connecting the two
821
+ nodes, or None if no path exists.
822
+ """
823
+
824
+ if self == other:
825
+ return 0
826
+
827
+ dist_dict1 = self._shortest_hypernym_paths(simulate_root)
828
+ dist_dict2 = other._shortest_hypernym_paths(simulate_root)
829
+
830
+ # For each ancestor synset common to both subject synsets, find the
831
+ # connecting path length. Return the shortest of these.
832
+
833
+ inf = float("inf")
834
+ path_distance = inf
835
+ for synset, d1 in dist_dict1.items():
836
+ d2 = dist_dict2.get(synset, inf)
837
+ path_distance = min(path_distance, d1 + d2)
838
+
839
+ return None if math.isinf(path_distance) else path_distance
840
+
841
+ # interface to similarity methods
842
+ def path_similarity(self, other, verbose=False, simulate_root=True):
843
+ """
844
+ Path Distance Similarity:
845
+ Return a score denoting how similar two word senses are, based on the
846
+ shortest path that connects the senses in the is-a (hypernym/hypnoym)
847
+ taxonomy. The score is in the range 0 to 1, except in those cases where
848
+ a path cannot be found (will only be true for verbs as there are many
849
+ distinct verb taxonomies), in which case None is returned. A score of
850
+ 1 represents identity i.e. comparing a sense with itself will return 1.
851
+
852
+ :type other: Synset
853
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
854
+ :type simulate_root: bool
855
+ :param simulate_root: The various verb taxonomies do not
856
+ share a single root which disallows this metric from working for
857
+ synsets that are not connected. This flag (True by default)
858
+ creates a fake root that connects all the taxonomies. Set it
859
+ to false to disable this behavior. For the noun taxonomy,
860
+ there is usually a default root except for WordNet version 1.6.
861
+ If you are using wordnet 1.6, a fake root will be added for nouns
862
+ as well.
863
+ :return: A score denoting the similarity of the two ``Synset`` objects,
864
+ normally between 0 and 1. None is returned if no connecting path
865
+ could be found. 1 is returned if a ``Synset`` is compared with
866
+ itself.
867
+ """
868
+
869
+ distance = self.shortest_path_distance(
870
+ other,
871
+ simulate_root=simulate_root and (self._needs_root() or other._needs_root()),
872
+ )
873
+ if distance is None or distance < 0:
874
+ return None
875
+ return 1.0 / (distance + 1)
876
+
877
+ def lch_similarity(self, other, verbose=False, simulate_root=True):
878
+ """
879
+ Leacock Chodorow Similarity:
880
+ Return a score denoting how similar two word senses are, based on the
881
+ shortest path that connects the senses (as above) and the maximum depth
882
+ of the taxonomy in which the senses occur. The relationship is given as
883
+ -log(p/2d) where p is the shortest path length and d is the taxonomy
884
+ depth.
885
+
886
+ :type other: Synset
887
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
888
+ :type simulate_root: bool
889
+ :param simulate_root: The various verb taxonomies do not
890
+ share a single root which disallows this metric from working for
891
+ synsets that are not connected. This flag (True by default)
892
+ creates a fake root that connects all the taxonomies. Set it
893
+ to false to disable this behavior. For the noun taxonomy,
894
+ there is usually a default root except for WordNet version 1.6.
895
+ If you are using wordnet 1.6, a fake root will be added for nouns
896
+ as well.
897
+ :return: A score denoting the similarity of the two ``Synset`` objects,
898
+ normally greater than 0. None is returned if no connecting path
899
+ could be found. If a ``Synset`` is compared with itself, the
900
+ maximum score is returned, which varies depending on the taxonomy
901
+ depth.
902
+ """
903
+
904
+ if self._pos != other._pos:
905
+ raise WordNetError(
906
+ "Computing the lch similarity requires "
907
+ "%s and %s to have the same part of speech." % (self, other)
908
+ )
909
+
910
+ need_root = self._needs_root()
911
+
912
+ if self._pos not in self._wordnet_corpus_reader._max_depth:
913
+ self._wordnet_corpus_reader._compute_max_depth(self._pos, need_root)
914
+
915
+ depth = self._wordnet_corpus_reader._max_depth[self._pos]
916
+
917
+ distance = self.shortest_path_distance(
918
+ other, simulate_root=simulate_root and need_root
919
+ )
920
+
921
+ if distance is None or distance < 0 or depth == 0:
922
+ return None
923
+ return -math.log((distance + 1) / (2.0 * depth))
924
+
925
+ def wup_similarity(self, other, verbose=False, simulate_root=True):
926
+ """
927
+ Wu-Palmer Similarity:
928
+ Return a score denoting how similar two word senses are, based on the
929
+ depth of the two senses in the taxonomy and that of their Least Common
930
+ Subsumer (most specific ancestor node). Previously, the scores computed
931
+ by this implementation did _not_ always agree with those given by
932
+ Pedersen's Perl implementation of WordNet Similarity. However, with
933
+ the addition of the simulate_root flag (see below), the score for
934
+ verbs now almost always agree but not always for nouns.
935
+
936
+ The LCS does not necessarily feature in the shortest path connecting
937
+ the two senses, as it is by definition the common ancestor deepest in
938
+ the taxonomy, not closest to the two senses. Typically, however, it
939
+ will so feature. Where multiple candidates for the LCS exist, that
940
+ whose shortest path to the root node is the longest will be selected.
941
+ Where the LCS has multiple paths to the root, the longer path is used
942
+ for the purposes of the calculation.
943
+
944
+ :type other: Synset
945
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
946
+ :type simulate_root: bool
947
+ :param simulate_root: The various verb taxonomies do not
948
+ share a single root which disallows this metric from working for
949
+ synsets that are not connected. This flag (True by default)
950
+ creates a fake root that connects all the taxonomies. Set it
951
+ to false to disable this behavior. For the noun taxonomy,
952
+ there is usually a default root except for WordNet version 1.6.
953
+ If you are using wordnet 1.6, a fake root will be added for nouns
954
+ as well.
955
+ :return: A float score denoting the similarity of the two ``Synset``
956
+ objects, normally greater than zero. If no connecting path between
957
+ the two senses can be found, None is returned.
958
+
959
+ """
960
+ need_root = self._needs_root() or other._needs_root()
961
+
962
+ # Note that to preserve behavior from NLTK2 we set use_min_depth=True
963
+ # It is possible that more accurate results could be obtained by
964
+ # removing this setting and it should be tested later on
965
+ subsumers = self.lowest_common_hypernyms(
966
+ other, simulate_root=simulate_root and need_root, use_min_depth=True
967
+ )
968
+
969
+ # If no LCS was found return None
970
+ if len(subsumers) == 0:
971
+ return None
972
+
973
+ subsumer = self if self in subsumers else subsumers[0]
974
+
975
+ # Get the longest path from the LCS to the root,
976
+ # including a correction:
977
+ # - add one because the calculations include both the start and end
978
+ # nodes
979
+ depth = subsumer.max_depth() + 1
980
+
981
+ # Note: No need for an additional add-one correction for non-nouns
982
+ # to account for an imaginary root node because that is now
983
+ # automatically handled by simulate_root
984
+ # if subsumer._pos != NOUN:
985
+ # depth += 1
986
+
987
+ # Get the shortest path from the LCS to each of the synsets it is
988
+ # subsuming. Add this to the LCS path length to get the path
989
+ # length from each synset to the root.
990
+ len1 = self.shortest_path_distance(
991
+ subsumer, simulate_root=simulate_root and need_root
992
+ )
993
+ len2 = other.shortest_path_distance(
994
+ subsumer, simulate_root=simulate_root and need_root
995
+ )
996
+ if len1 is None or len2 is None:
997
+ return None
998
+ len1 += depth
999
+ len2 += depth
1000
+ return (2.0 * depth) / (len1 + len2)
1001
+
1002
+ def res_similarity(self, other, ic, verbose=False):
1003
+ """
1004
+ Resnik Similarity:
1005
+ Return a score denoting how similar two word senses are, based on the
1006
+ Information Content (IC) of the Least Common Subsumer (most specific
1007
+ ancestor node).
1008
+
1009
+ :type other: Synset
1010
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
1011
+ :type ic: dict
1012
+ :param ic: an information content object (as returned by
1013
+ ``nltk.corpus.wordnet_ic.ic()``).
1014
+ :return: A float score denoting the similarity of the two ``Synset``
1015
+ objects. Synsets whose LCS is the root node of the taxonomy will
1016
+ have a score of 0 (e.g. N['dog'][0] and N['table'][0]).
1017
+ """
1018
+
1019
+ ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
1020
+ return lcs_ic
1021
+
1022
+ def jcn_similarity(self, other, ic, verbose=False):
1023
+ """
1024
+ Jiang-Conrath Similarity:
1025
+ Return a score denoting how similar two word senses are, based on the
1026
+ Information Content (IC) of the Least Common Subsumer (most specific
1027
+ ancestor node) and that of the two input Synsets. The relationship is
1028
+ given by the equation 1 / (IC(s1) + IC(s2) - 2 * IC(lcs)).
1029
+
1030
+ :type other: Synset
1031
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
1032
+ :type ic: dict
1033
+ :param ic: an information content object (as returned by
1034
+ ``nltk.corpus.wordnet_ic.ic()``).
1035
+ :return: A float score denoting the similarity of the two ``Synset``
1036
+ objects.
1037
+ """
1038
+
1039
+ if self == other:
1040
+ return _INF
1041
+
1042
+ ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
1043
+
1044
+ # If either of the input synsets are the root synset, or have a
1045
+ # frequency of 0 (sparse data problem), return 0.
1046
+ if ic1 == 0 or ic2 == 0:
1047
+ return 0
1048
+
1049
+ ic_difference = ic1 + ic2 - 2 * lcs_ic
1050
+
1051
+ if ic_difference == 0:
1052
+ return _INF
1053
+
1054
+ return 1 / ic_difference
1055
+
1056
+ def lin_similarity(self, other, ic, verbose=False):
1057
+ """
1058
+ Lin Similarity:
1059
+ Return a score denoting how similar two word senses are, based on the
1060
+ Information Content (IC) of the Least Common Subsumer (most specific
1061
+ ancestor node) and that of the two input Synsets. The relationship is
1062
+ given by the equation 2 * IC(lcs) / (IC(s1) + IC(s2)).
1063
+
1064
+ :type other: Synset
1065
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
1066
+ :type ic: dict
1067
+ :param ic: an information content object (as returned by
1068
+ ``nltk.corpus.wordnet_ic.ic()``).
1069
+ :return: A float score denoting the similarity of the two ``Synset``
1070
+ objects, in the range 0 to 1.
1071
+ """
1072
+
1073
+ ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
1074
+ return (2.0 * lcs_ic) / (ic1 + ic2)
1075
+
1076
+ def _iter_hypernym_lists(self):
1077
+ """
1078
+ :return: An iterator over ``Synset`` objects that are either proper
1079
+ hypernyms or instance of hypernyms of the synset.
1080
+ """
1081
+ todo = [self]
1082
+ seen = set()
1083
+ while todo:
1084
+ for synset in todo:
1085
+ seen.add(synset)
1086
+ yield todo
1087
+ todo = [
1088
+ hypernym
1089
+ for synset in todo
1090
+ for hypernym in (synset.hypernyms() + synset.instance_hypernyms())
1091
+ if hypernym not in seen
1092
+ ]
1093
+
1094
+ def __repr__(self):
1095
+ return f"{type(self).__name__}('{self._name}')"
1096
+
1097
+ def _related(self, relation_symbol, sort=True):
1098
+ get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset
1099
+ if relation_symbol not in self._pointers:
1100
+ return []
1101
+ pointer_tuples = self._pointers[relation_symbol]
1102
+ r = [get_synset(pos, offset) for pos, offset in pointer_tuples]
1103
+ if sort:
1104
+ r.sort()
1105
+ return r
1106
+
1107
+
1108
+ ######################################################################
1109
+ # WordNet Corpus Reader
1110
+ ######################################################################
1111
+
1112
+
1113
+ class WordNetCorpusReader(CorpusReader):
1114
+ """
1115
+ A corpus reader used to access wordnet or its variants.
1116
+ """
1117
+
1118
+ _ENCODING = "utf8"
1119
+
1120
+ # { Part-of-speech constants
1121
+ ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v"
1122
+ # }
1123
+
1124
+ # { Filename constants
1125
+ _FILEMAP = {ADJ: "adj", ADV: "adv", NOUN: "noun", VERB: "verb"}
1126
+ # }
1127
+
1128
+ # { Part of speech constants
1129
+ _pos_numbers = {NOUN: 1, VERB: 2, ADJ: 3, ADV: 4, ADJ_SAT: 5}
1130
+ _pos_names = dict(tup[::-1] for tup in _pos_numbers.items())
1131
+ # }
1132
+
1133
+ #: A list of file identifiers for all the fileids used by this
1134
+ #: corpus reader.
1135
+ _FILES = (
1136
+ "cntlist.rev",
1137
+ "lexnames",
1138
+ "index.sense",
1139
+ "index.adj",
1140
+ "index.adv",
1141
+ "index.noun",
1142
+ "index.verb",
1143
+ "data.adj",
1144
+ "data.adv",
1145
+ "data.noun",
1146
+ "data.verb",
1147
+ "adj.exc",
1148
+ "adv.exc",
1149
+ "noun.exc",
1150
+ "verb.exc",
1151
+ )
1152
+
1153
+ def __init__(self, root, omw_reader):
1154
+ """
1155
+ Construct a new wordnet corpus reader, with the given root
1156
+ directory.
1157
+ """
1158
+
1159
+ super().__init__(root, self._FILES, encoding=self._ENCODING)
1160
+
1161
+ # A index that provides the file offset
1162
+ # Map from lemma -> pos -> synset_index -> offset
1163
+ self._lemma_pos_offset_map = defaultdict(dict)
1164
+
1165
+ # A cache so we don't have to reconstruct synsets
1166
+ # Map from pos -> offset -> synset
1167
+ self._synset_offset_cache = defaultdict(dict)
1168
+
1169
+ # A lookup for the maximum depth of each part of speech. Useful for
1170
+ # the lch similarity metric.
1171
+ self._max_depth = defaultdict(dict)
1172
+
1173
+ # Corpus reader containing omw data.
1174
+ self._omw_reader = omw_reader
1175
+
1176
+ # Corpus reader containing extended_omw data.
1177
+ self._exomw_reader = None
1178
+
1179
+ self.provenances = defaultdict(str)
1180
+ self.provenances["eng"] = ""
1181
+
1182
+ if self._omw_reader is None:
1183
+ warnings.warn(
1184
+ "The multilingual functions are not available with this Wordnet version"
1185
+ )
1186
+
1187
+ self.omw_langs = set()
1188
+
1189
+ # A cache to store the wordnet data of multiple languages
1190
+ self._lang_data = defaultdict(list)
1191
+
1192
+ self._data_file_map = {}
1193
+ self._exception_map = {}
1194
+ self._lexnames = []
1195
+ self._key_count_file = None
1196
+ self._key_synset_file = None
1197
+
1198
+ # Load the lexnames
1199
+ with self.open("lexnames") as fp:
1200
+ for i, line in enumerate(fp):
1201
+ index, lexname, _ = line.split()
1202
+ assert int(index) == i
1203
+ self._lexnames.append(lexname)
1204
+
1205
+ # Load the indices for lemmas and synset offsets
1206
+ self._load_lemma_pos_offset_map()
1207
+
1208
+ # load the exception file data into memory
1209
+ self._load_exception_map()
1210
+
1211
+ self.nomap = []
1212
+ self.splits = {}
1213
+
1214
+ # map from WordNet 3.0 for OMW data
1215
+ self.map30 = self.map_wn30()
1216
+
1217
+ # Language data attributes
1218
+ self.lg_attrs = ["lemma", "none", "def", "exe"]
1219
+
1220
+ def index_sense(self, version=None):
1221
+ """Read sense key to synset id mapping from index.sense file in corpus directory"""
1222
+ fn = "index.sense"
1223
+ if version:
1224
+ from nltk.corpus import CorpusReader, LazyCorpusLoader
1225
+
1226
+ ixreader = LazyCorpusLoader(version, CorpusReader, r".*/" + fn)
1227
+ else:
1228
+ ixreader = self
1229
+ with ixreader.open(fn) as fp:
1230
+ sensekey_map = {}
1231
+ for line in fp:
1232
+ fields = line.strip().split()
1233
+ sensekey = fields[0]
1234
+ pos = self._pos_names[int(sensekey.split("%")[1].split(":")[0])]
1235
+ sensekey_map[sensekey] = f"{fields[1]}-{pos}"
1236
+ return sensekey_map
1237
+
1238
+ def map_to_many(self):
1239
+ sensekey_map1 = self.index_sense("wordnet")
1240
+ sensekey_map2 = self.index_sense()
1241
+ synset_to_many = {}
1242
+ for synsetid in set(sensekey_map1.values()):
1243
+ synset_to_many[synsetid] = []
1244
+ for sensekey in set(sensekey_map1.keys()).intersection(
1245
+ set(sensekey_map2.keys())
1246
+ ):
1247
+ source = sensekey_map1[sensekey]
1248
+ target = sensekey_map2[sensekey]
1249
+ synset_to_many[source].append(target)
1250
+ return synset_to_many
1251
+
1252
+ def map_to_one(self):
1253
+ synset_to_many = self.map_to_many()
1254
+ synset_to_one = {}
1255
+ for source in synset_to_many:
1256
+ candidates_bag = synset_to_many[source]
1257
+ if candidates_bag:
1258
+ candidates_set = set(candidates_bag)
1259
+ if len(candidates_set) == 1:
1260
+ target = candidates_bag[0]
1261
+ else:
1262
+ counts = []
1263
+ for candidate in candidates_set:
1264
+ counts.append((candidates_bag.count(candidate), candidate))
1265
+ self.splits[source] = counts
1266
+ target = max(counts)[1]
1267
+ synset_to_one[source] = target
1268
+ if source[-1] == "s":
1269
+ # Add a mapping from "a" to target for applications like omw,
1270
+ # where only Lithuanian and Slovak use the "s" ss_type.
1271
+ synset_to_one[f"{source[:-1]}a"] = target
1272
+ else:
1273
+ self.nomap.append(source)
1274
+ return synset_to_one
1275
+
1276
+ def map_wn30(self):
1277
+ """Mapping from Wordnet 3.0 to currently loaded Wordnet version"""
1278
+ if self.get_version() == "3.0":
1279
+ return None
1280
+ else:
1281
+ return self.map_to_one()
1282
+
1283
+ # Open Multilingual WordNet functions, contributed by
1284
+ # Nasruddin A’aidil Shari, Sim Wei Ying Geraldine, and Soe Lynn
1285
+
1286
+ def of2ss(self, of):
1287
+ """take an id and return the synsets"""
1288
+ return self.synset_from_pos_and_offset(of[-1], int(of[:8]))
1289
+
1290
+ def ss2of(self, ss):
1291
+ """return the ID of the synset"""
1292
+ if ss:
1293
+ return f"{ss.offset():08d}-{ss.pos()}"
1294
+
1295
+ def _load_lang_data(self, lang):
1296
+ """load the wordnet data of the requested language from the file to
1297
+ the cache, _lang_data"""
1298
+
1299
+ if lang in self._lang_data:
1300
+ return
1301
+
1302
+ if self._omw_reader and not self.omw_langs:
1303
+ self.add_omw()
1304
+
1305
+ if lang not in self.langs():
1306
+ raise WordNetError("Language is not supported.")
1307
+
1308
+ if self._exomw_reader and lang not in self.omw_langs:
1309
+ reader = self._exomw_reader
1310
+ else:
1311
+ reader = self._omw_reader
1312
+
1313
+ prov = self.provenances[lang]
1314
+ if prov in ["cldr", "wikt"]:
1315
+ prov2 = prov
1316
+ else:
1317
+ prov2 = "data"
1318
+
1319
+ with reader.open(f"{prov}/wn-{prov2}-{lang.split('_')[0]}.tab") as fp:
1320
+ self.custom_lemmas(fp, lang)
1321
+ self.disable_custom_lemmas(lang)
1322
+
1323
+ def add_provs(self, reader):
1324
+ """Add languages from Multilingual Wordnet to the provenance dictionary"""
1325
+ fileids = reader.fileids()
1326
+ for fileid in fileids:
1327
+ prov, langfile = os.path.split(fileid)
1328
+ file_name, file_extension = os.path.splitext(langfile)
1329
+ if file_extension == ".tab":
1330
+ lang = file_name.split("-")[-1]
1331
+ if lang in self.provenances or prov in ["cldr", "wikt"]:
1332
+ # We already have another resource for this lang,
1333
+ # so we need to further specify the lang id:
1334
+ lang = f"{lang}_{prov}"
1335
+ self.provenances[lang] = prov
1336
+
1337
+ def add_omw(self):
1338
+ self.add_provs(self._omw_reader)
1339
+ self.omw_langs = set(self.provenances.keys())
1340
+
1341
+ def add_exomw(self):
1342
+ """
1343
+ Add languages from Extended OMW
1344
+
1345
+ >>> import nltk
1346
+ >>> from nltk.corpus import wordnet as wn
1347
+ >>> wn.add_exomw()
1348
+ >>> print(wn.synset('intrinsically.r.01').lemmas(lang="eng_wikt"))
1349
+ [Lemma('intrinsically.r.01.per_se'), Lemma('intrinsically.r.01.as_such')]
1350
+ """
1351
+ from nltk.corpus import extended_omw
1352
+
1353
+ self.add_omw()
1354
+ self._exomw_reader = extended_omw
1355
+ self.add_provs(self._exomw_reader)
1356
+
1357
+ def langs(self):
1358
+ """return a list of languages supported by Multilingual Wordnet"""
1359
+ return list(self.provenances.keys())
1360
+
1361
+ def _load_lemma_pos_offset_map(self):
1362
+ for suffix in self._FILEMAP.values():
1363
+
1364
+ # parse each line of the file (ignoring comment lines)
1365
+ with self.open("index.%s" % suffix) as fp:
1366
+ for i, line in enumerate(fp):
1367
+ if line.startswith(" "):
1368
+ continue
1369
+
1370
+ _iter = iter(line.split())
1371
+
1372
+ def _next_token():
1373
+ return next(_iter)
1374
+
1375
+ try:
1376
+
1377
+ # get the lemma and part-of-speech
1378
+ lemma = _next_token()
1379
+ pos = _next_token()
1380
+
1381
+ # get the number of synsets for this lemma
1382
+ n_synsets = int(_next_token())
1383
+ assert n_synsets > 0
1384
+
1385
+ # get and ignore the pointer symbols for all synsets of
1386
+ # this lemma
1387
+ n_pointers = int(_next_token())
1388
+ [_next_token() for _ in range(n_pointers)]
1389
+
1390
+ # same as number of synsets
1391
+ n_senses = int(_next_token())
1392
+ assert n_synsets == n_senses
1393
+
1394
+ # get and ignore number of senses ranked according to
1395
+ # frequency
1396
+ _next_token()
1397
+
1398
+ # get synset offsets
1399
+ synset_offsets = [int(_next_token()) for _ in range(n_synsets)]
1400
+
1401
+ # raise more informative error with file name and line number
1402
+ except (AssertionError, ValueError) as e:
1403
+ tup = ("index.%s" % suffix), (i + 1), e
1404
+ raise WordNetError("file %s, line %i: %s" % tup) from e
1405
+
1406
+ # map lemmas and parts of speech to synsets
1407
+ self._lemma_pos_offset_map[lemma][pos] = synset_offsets
1408
+ if pos == ADJ:
1409
+ self._lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets
1410
+
1411
+ def _load_exception_map(self):
1412
+ # load the exception file data into memory
1413
+ for pos, suffix in self._FILEMAP.items():
1414
+ self._exception_map[pos] = {}
1415
+ with self.open("%s.exc" % suffix) as fp:
1416
+ for line in fp:
1417
+ terms = line.split()
1418
+ self._exception_map[pos][terms[0]] = terms[1:]
1419
+ self._exception_map[ADJ_SAT] = self._exception_map[ADJ]
1420
+
1421
+ def _compute_max_depth(self, pos, simulate_root):
1422
+ """
1423
+ Compute the max depth for the given part of speech. This is
1424
+ used by the lch similarity metric.
1425
+ """
1426
+ depth = 0
1427
+ for ii in self.all_synsets(pos):
1428
+ try:
1429
+ depth = max(depth, ii.max_depth())
1430
+ except RuntimeError:
1431
+ print(ii)
1432
+ if simulate_root:
1433
+ depth += 1
1434
+ self._max_depth[pos] = depth
1435
+
1436
+ def get_version(self):
1437
+ fh = self._data_file(ADJ)
1438
+ fh.seek(0)
1439
+ for line in fh:
1440
+ match = re.search(r"Word[nN]et (\d+|\d+\.\d+) Copyright", line)
1441
+ if match is not None:
1442
+ version = match.group(1)
1443
+ fh.seek(0)
1444
+ return version
1445
+
1446
+ #############################################################
1447
+ # Loading Lemmas
1448
+ #############################################################
1449
+
1450
+ def lemma(self, name, lang="eng"):
1451
+ """Return lemma object that matches the name"""
1452
+ # cannot simply split on first '.',
1453
+ # e.g.: '.45_caliber.a.01..45_caliber'
1454
+ separator = SENSENUM_RE.search(name).end()
1455
+
1456
+ synset_name, lemma_name = name[: separator - 1], name[separator:]
1457
+
1458
+ synset = self.synset(synset_name)
1459
+ for lemma in synset.lemmas(lang):
1460
+ if lemma._name == lemma_name:
1461
+ return lemma
1462
+ raise WordNetError(f"No lemma {lemma_name!r} in {synset_name!r}")
1463
+
1464
+ def lemma_from_key(self, key):
1465
+ # Keys are case sensitive and always lower-case
1466
+ key = key.lower()
1467
+
1468
+ lemma_name, lex_sense = key.split("%")
1469
+ pos_number, lexname_index, lex_id, _, _ = lex_sense.split(":")
1470
+ pos = self._pos_names[int(pos_number)]
1471
+
1472
+ # open the key -> synset file if necessary
1473
+ if self._key_synset_file is None:
1474
+ self._key_synset_file = self.open("index.sense")
1475
+
1476
+ # Find the synset for the lemma.
1477
+ synset_line = _binary_search_file(self._key_synset_file, key)
1478
+ if not synset_line:
1479
+ raise WordNetError("No synset found for key %r" % key)
1480
+ offset = int(synset_line.split()[1])
1481
+ synset = self.synset_from_pos_and_offset(pos, offset)
1482
+ # return the corresponding lemma
1483
+ for lemma in synset._lemmas:
1484
+ if lemma._key == key:
1485
+ return lemma
1486
+ raise WordNetError("No lemma found for for key %r" % key)
1487
+
1488
+ #############################################################
1489
+ # Loading Synsets
1490
+ #############################################################
1491
+ def synset(self, name):
1492
+ # split name into lemma, part of speech and synset number
1493
+ lemma, pos, synset_index_str = name.lower().rsplit(".", 2)
1494
+ synset_index = int(synset_index_str) - 1
1495
+
1496
+ # get the offset for this synset
1497
+ try:
1498
+ offset = self._lemma_pos_offset_map[lemma][pos][synset_index]
1499
+ except KeyError as e:
1500
+ raise WordNetError(f"No lemma {lemma!r} with part of speech {pos!r}") from e
1501
+ except IndexError as e:
1502
+ n_senses = len(self._lemma_pos_offset_map[lemma][pos])
1503
+ raise WordNetError(
1504
+ f"Lemma {lemma!r} with part of speech {pos!r} only "
1505
+ f"has {n_senses} {'sense' if n_senses == 1 else 'senses'}"
1506
+ ) from e
1507
+
1508
+ # load synset information from the appropriate file
1509
+ synset = self.synset_from_pos_and_offset(pos, offset)
1510
+
1511
+ # some basic sanity checks on loaded attributes
1512
+ if pos == "s" and synset._pos == "a":
1513
+ message = (
1514
+ "Adjective satellite requested but only plain "
1515
+ "adjective found for lemma %r"
1516
+ )
1517
+ raise WordNetError(message % lemma)
1518
+ assert synset._pos == pos or (pos == "a" and synset._pos == "s")
1519
+
1520
+ # Return the synset object.
1521
+ return synset
1522
+
1523
+ def _data_file(self, pos):
1524
+ """
1525
+ Return an open file pointer for the data file for the given
1526
+ part of speech.
1527
+ """
1528
+ if pos == ADJ_SAT:
1529
+ pos = ADJ
1530
+ if self._data_file_map.get(pos) is None:
1531
+ fileid = "data.%s" % self._FILEMAP[pos]
1532
+ self._data_file_map[pos] = self.open(fileid)
1533
+ return self._data_file_map[pos]
1534
+
1535
+ def synset_from_pos_and_offset(self, pos, offset):
1536
+ """
1537
+ - pos: The synset's part of speech, matching one of the module level
1538
+ attributes ADJ, ADJ_SAT, ADV, NOUN or VERB ('a', 's', 'r', 'n', or 'v').
1539
+ - offset: The byte offset of this synset in the WordNet dict file
1540
+ for this pos.
1541
+
1542
+ >>> from nltk.corpus import wordnet as wn
1543
+ >>> print(wn.synset_from_pos_and_offset('n', 1740))
1544
+ Synset('entity.n.01')
1545
+ """
1546
+ # Check to see if the synset is in the cache
1547
+ if offset in self._synset_offset_cache[pos]:
1548
+ return self._synset_offset_cache[pos][offset]
1549
+
1550
+ data_file = self._data_file(pos)
1551
+ data_file.seek(offset)
1552
+ data_file_line = data_file.readline()
1553
+ # If valid, the offset equals the 8-digit 0-padded integer found at the start of the line:
1554
+ line_offset = data_file_line[:8]
1555
+ if (
1556
+ line_offset.isalnum()
1557
+ and line_offset == f"{'0'*(8-len(str(offset)))}{str(offset)}"
1558
+ ):
1559
+ synset = self._synset_from_pos_and_line(pos, data_file_line)
1560
+ assert synset._offset == offset
1561
+ self._synset_offset_cache[pos][offset] = synset
1562
+ else:
1563
+ synset = None
1564
+ warnings.warn(f"No WordNet synset found for pos={pos} at offset={offset}.")
1565
+ data_file.seek(0)
1566
+ return synset
1567
+
1568
+ @deprecated("Use public method synset_from_pos_and_offset() instead")
1569
+ def _synset_from_pos_and_offset(self, *args, **kwargs):
1570
+ """
1571
+ Hack to help people like the readers of
1572
+ https://stackoverflow.com/a/27145655/1709587
1573
+ who were using this function before it was officially a public method
1574
+ """
1575
+ return self.synset_from_pos_and_offset(*args, **kwargs)
1576
+
1577
+ def _synset_from_pos_and_line(self, pos, data_file_line):
1578
+ # Construct a new (empty) synset.
1579
+ synset = Synset(self)
1580
+
1581
+ # parse the entry for this synset
1582
+ try:
1583
+
1584
+ # parse out the definitions and examples from the gloss
1585
+ columns_str, gloss = data_file_line.strip().split("|")
1586
+ definition = re.sub(r"[\"].*?[\"]", "", gloss).strip()
1587
+ examples = re.findall(r'"([^"]*)"', gloss)
1588
+ for example in examples:
1589
+ synset._examples.append(example)
1590
+
1591
+ synset._definition = definition.strip("; ")
1592
+
1593
+ # split the other info into fields
1594
+ _iter = iter(columns_str.split())
1595
+
1596
+ def _next_token():
1597
+ return next(_iter)
1598
+
1599
+ # get the offset
1600
+ synset._offset = int(_next_token())
1601
+
1602
+ # determine the lexicographer file name
1603
+ lexname_index = int(_next_token())
1604
+ synset._lexname = self._lexnames[lexname_index]
1605
+
1606
+ # get the part of speech
1607
+ synset._pos = _next_token()
1608
+
1609
+ # create Lemma objects for each lemma
1610
+ n_lemmas = int(_next_token(), 16)
1611
+ for _ in range(n_lemmas):
1612
+ # get the lemma name
1613
+ lemma_name = _next_token()
1614
+ # get the lex_id (used for sense_keys)
1615
+ lex_id = int(_next_token(), 16)
1616
+ # If the lemma has a syntactic marker, extract it.
1617
+ m = re.match(r"(.*?)(\(.*\))?$", lemma_name)
1618
+ lemma_name, syn_mark = m.groups()
1619
+ # create the lemma object
1620
+ lemma = Lemma(self, synset, lemma_name, lexname_index, lex_id, syn_mark)
1621
+ synset._lemmas.append(lemma)
1622
+ synset._lemma_names.append(lemma._name)
1623
+
1624
+ # collect the pointer tuples
1625
+ n_pointers = int(_next_token())
1626
+ for _ in range(n_pointers):
1627
+ symbol = _next_token()
1628
+ offset = int(_next_token())
1629
+ pos = _next_token()
1630
+ lemma_ids_str = _next_token()
1631
+ if lemma_ids_str == "0000":
1632
+ synset._pointers[symbol].add((pos, offset))
1633
+ else:
1634
+ source_index = int(lemma_ids_str[:2], 16) - 1
1635
+ target_index = int(lemma_ids_str[2:], 16) - 1
1636
+ source_lemma_name = synset._lemmas[source_index]._name
1637
+ lemma_pointers = synset._lemma_pointers
1638
+ tups = lemma_pointers[source_lemma_name, symbol]
1639
+ tups.append((pos, offset, target_index))
1640
+
1641
+ # read the verb frames
1642
+ try:
1643
+ frame_count = int(_next_token())
1644
+ except StopIteration:
1645
+ pass
1646
+ else:
1647
+ for _ in range(frame_count):
1648
+ # read the plus sign
1649
+ plus = _next_token()
1650
+ assert plus == "+"
1651
+ # read the frame and lemma number
1652
+ frame_number = int(_next_token())
1653
+ frame_string_fmt = VERB_FRAME_STRINGS[frame_number]
1654
+ lemma_number = int(_next_token(), 16)
1655
+ # lemma number of 00 means all words in the synset
1656
+ if lemma_number == 0:
1657
+ synset._frame_ids.append(frame_number)
1658
+ for lemma in synset._lemmas:
1659
+ lemma._frame_ids.append(frame_number)
1660
+ lemma._frame_strings.append(frame_string_fmt % lemma._name)
1661
+ # only a specific word in the synset
1662
+ else:
1663
+ lemma = synset._lemmas[lemma_number - 1]
1664
+ lemma._frame_ids.append(frame_number)
1665
+ lemma._frame_strings.append(frame_string_fmt % lemma._name)
1666
+
1667
+ # raise a more informative error with line text
1668
+ except ValueError as e:
1669
+ raise WordNetError(f"line {data_file_line!r}: {e}") from e
1670
+
1671
+ # set sense keys for Lemma objects - note that this has to be
1672
+ # done afterwards so that the relations are available
1673
+ for lemma in synset._lemmas:
1674
+ if synset._pos == ADJ_SAT:
1675
+ head_lemma = synset.similar_tos()[0]._lemmas[0]
1676
+ head_name = head_lemma._name
1677
+ head_id = "%02d" % head_lemma._lex_id
1678
+ else:
1679
+ head_name = head_id = ""
1680
+ tup = (
1681
+ lemma._name,
1682
+ WordNetCorpusReader._pos_numbers[synset._pos],
1683
+ lemma._lexname_index,
1684
+ lemma._lex_id,
1685
+ head_name,
1686
+ head_id,
1687
+ )
1688
+ lemma._key = ("%s%%%d:%02d:%02d:%s:%s" % tup).lower()
1689
+
1690
+ # the canonical name is based on the first lemma
1691
+ lemma_name = synset._lemmas[0]._name.lower()
1692
+ offsets = self._lemma_pos_offset_map[lemma_name][synset._pos]
1693
+ sense_index = offsets.index(synset._offset)
1694
+ tup = lemma_name, synset._pos, sense_index + 1
1695
+ synset._name = "%s.%s.%02i" % tup
1696
+
1697
+ return synset
1698
+
1699
+ def synset_from_sense_key(self, sense_key):
1700
+ """
1701
+ Retrieves synset based on a given sense_key. Sense keys can be
1702
+ obtained from lemma.key()
1703
+
1704
+ From https://wordnet.princeton.edu/documentation/senseidx5wn:
1705
+ A sense_key is represented as::
1706
+
1707
+ lemma % lex_sense (e.g. 'dog%1:18:01::')
1708
+
1709
+ where lex_sense is encoded as::
1710
+
1711
+ ss_type:lex_filenum:lex_id:head_word:head_id
1712
+
1713
+ :lemma: ASCII text of word/collocation, in lower case
1714
+ :ss_type: synset type for the sense (1 digit int)
1715
+ The synset type is encoded as follows::
1716
+
1717
+ 1 NOUN
1718
+ 2 VERB
1719
+ 3 ADJECTIVE
1720
+ 4 ADVERB
1721
+ 5 ADJECTIVE SATELLITE
1722
+ :lex_filenum: name of lexicographer file containing the synset for the sense (2 digit int)
1723
+ :lex_id: when paired with lemma, uniquely identifies a sense in the lexicographer file (2 digit int)
1724
+ :head_word: lemma of the first word in satellite's head synset
1725
+ Only used if sense is in an adjective satellite synset
1726
+ :head_id: uniquely identifies sense in a lexicographer file when paired with head_word
1727
+ Only used if head_word is present (2 digit int)
1728
+
1729
+ >>> import nltk
1730
+ >>> from nltk.corpus import wordnet as wn
1731
+ >>> print(wn.synset_from_sense_key("drive%1:04:03::"))
1732
+ Synset('drive.n.06')
1733
+
1734
+ >>> print(wn.synset_from_sense_key("driving%1:04:03::"))
1735
+ Synset('drive.n.06')
1736
+ """
1737
+ return self.lemma_from_key(sense_key).synset()
1738
+
1739
+ #############################################################
1740
+ # Retrieve synsets and lemmas.
1741
+ #############################################################
1742
+
1743
+ def synsets(self, lemma, pos=None, lang="eng", check_exceptions=True):
1744
+ """Load all synsets with a given lemma and part of speech tag.
1745
+ If no pos is specified, all synsets for all parts of speech
1746
+ will be loaded.
1747
+ If lang is specified, all the synsets associated with the lemma name
1748
+ of that language will be returned.
1749
+ """
1750
+ lemma = lemma.lower()
1751
+
1752
+ if lang == "eng":
1753
+ get_synset = self.synset_from_pos_and_offset
1754
+ index = self._lemma_pos_offset_map
1755
+ if pos is None:
1756
+ pos = POS_LIST
1757
+ return [
1758
+ get_synset(p, offset)
1759
+ for p in pos
1760
+ for form in self._morphy(lemma, p, check_exceptions)
1761
+ for offset in index[form].get(p, [])
1762
+ ]
1763
+
1764
+ else:
1765
+ self._load_lang_data(lang)
1766
+ synset_list = []
1767
+ if lemma in self._lang_data[lang][1]:
1768
+ for l in self._lang_data[lang][1][lemma]:
1769
+ if pos is not None and l[-1] != pos:
1770
+ continue
1771
+ synset_list.append(self.of2ss(l))
1772
+ return synset_list
1773
+
1774
+ def lemmas(self, lemma, pos=None, lang="eng"):
1775
+ """Return all Lemma objects with a name matching the specified lemma
1776
+ name and part of speech tag. Matches any part of speech tag if none is
1777
+ specified."""
1778
+
1779
+ lemma = lemma.lower()
1780
+ if lang == "eng":
1781
+ return [
1782
+ lemma_obj
1783
+ for synset in self.synsets(lemma, pos)
1784
+ for lemma_obj in synset.lemmas()
1785
+ if lemma_obj.name().lower() == lemma
1786
+ ]
1787
+
1788
+ else:
1789
+ self._load_lang_data(lang)
1790
+ lemmas = []
1791
+ syn = self.synsets(lemma, lang=lang)
1792
+ for s in syn:
1793
+ if pos is not None and s.pos() != pos:
1794
+ continue
1795
+ for lemma_obj in s.lemmas(lang=lang):
1796
+ if lemma_obj.name().lower() == lemma:
1797
+ lemmas.append(lemma_obj)
1798
+ return lemmas
1799
+
1800
+ def all_lemma_names(self, pos=None, lang="eng"):
1801
+ """Return all lemma names for all synsets for the given
1802
+ part of speech tag and language or languages. If pos is
1803
+ not specified, all synsets for all parts of speech will
1804
+ be used."""
1805
+
1806
+ if lang == "eng":
1807
+ if pos is None:
1808
+ return iter(self._lemma_pos_offset_map)
1809
+ else:
1810
+ return (
1811
+ lemma
1812
+ for lemma in self._lemma_pos_offset_map
1813
+ if pos in self._lemma_pos_offset_map[lemma]
1814
+ )
1815
+ else:
1816
+ self._load_lang_data(lang)
1817
+ lemma = []
1818
+ for i in self._lang_data[lang][0]:
1819
+ if pos is not None and i[-1] != pos:
1820
+ continue
1821
+ lemma.extend(self._lang_data[lang][0][i])
1822
+
1823
+ lemma = iter(set(lemma))
1824
+ return lemma
1825
+
1826
+ def all_omw_synsets(self, pos=None, lang=None):
1827
+ if lang not in self.langs():
1828
+ return None
1829
+ self._load_lang_data(lang)
1830
+ for of in self._lang_data[lang][0]:
1831
+ if not pos or of[-1] == pos:
1832
+ ss = self.of2ss(of)
1833
+ if ss:
1834
+ yield ss
1835
+
1836
+ # else:
1837
+ # A few OMW offsets don't exist in Wordnet 3.0.
1838
+ # warnings.warn(f"Language {lang}: no synset found for {of}")
1839
+
1840
+ def all_synsets(self, pos=None, lang="eng"):
1841
+ """Iterate over all synsets with a given part of speech tag.
1842
+ If no pos is specified, all synsets for all parts of speech
1843
+ will be loaded.
1844
+ """
1845
+ if lang == "eng":
1846
+ return self.all_eng_synsets(pos=pos)
1847
+ else:
1848
+ return self.all_omw_synsets(pos=pos, lang=lang)
1849
+
1850
+ def all_eng_synsets(self, pos=None):
1851
+ if pos is None:
1852
+ pos_tags = self._FILEMAP.keys()
1853
+ else:
1854
+ pos_tags = [pos]
1855
+
1856
+ cache = self._synset_offset_cache
1857
+ from_pos_and_line = self._synset_from_pos_and_line
1858
+
1859
+ # generate all synsets for each part of speech
1860
+ for pos_tag in pos_tags:
1861
+ # Open the file for reading. Note that we can not re-use
1862
+ # the file pointers from self._data_file_map here, because
1863
+ # we're defining an iterator, and those file pointers might
1864
+ # be moved while we're not looking.
1865
+ if pos_tag == ADJ_SAT:
1866
+ pos_file = ADJ
1867
+ else:
1868
+ pos_file = pos_tag
1869
+ fileid = "data.%s" % self._FILEMAP[pos_file]
1870
+ data_file = self.open(fileid)
1871
+
1872
+ try:
1873
+ # generate synsets for each line in the POS file
1874
+ offset = data_file.tell()
1875
+ line = data_file.readline()
1876
+ while line:
1877
+ if not line[0].isspace():
1878
+ if offset in cache[pos_tag]:
1879
+ # See if the synset is cached
1880
+ synset = cache[pos_tag][offset]
1881
+ else:
1882
+ # Otherwise, parse the line
1883
+ synset = from_pos_and_line(pos_tag, line)
1884
+ cache[pos_tag][offset] = synset
1885
+
1886
+ # adjective satellites are in the same file as
1887
+ # adjectives so only yield the synset if it's actually
1888
+ # a satellite
1889
+ if pos_tag == ADJ_SAT and synset._pos == ADJ_SAT:
1890
+ yield synset
1891
+ # for all other POS tags, yield all synsets (this means
1892
+ # that adjectives also include adjective satellites)
1893
+ elif pos_tag != ADJ_SAT:
1894
+ yield synset
1895
+ offset = data_file.tell()
1896
+ line = data_file.readline()
1897
+
1898
+ # close the extra file handle we opened
1899
+ except:
1900
+ data_file.close()
1901
+ raise
1902
+ else:
1903
+ data_file.close()
1904
+
1905
+ def words(self, lang="eng"):
1906
+ """return lemmas of the given language as list of words"""
1907
+ return self.all_lemma_names(lang=lang)
1908
+
1909
+ def synonyms(self, word, lang="eng"):
1910
+ """return nested list with the synonyms of the different senses of word in the given language"""
1911
+ return [
1912
+ sorted(list(set(ss.lemma_names(lang=lang)) - {word}))
1913
+ for ss in self.synsets(word, lang=lang)
1914
+ ]
1915
+
1916
+ def doc(self, file="README", lang="eng"):
1917
+ """Return the contents of readme, license or citation file
1918
+ use lang=lang to get the file for an individual language"""
1919
+ if lang == "eng":
1920
+ reader = self
1921
+ else:
1922
+ reader = self._omw_reader
1923
+ if lang in self.langs():
1924
+ file = f"{os.path.join(self.provenances[lang],file)}"
1925
+ try:
1926
+ with reader.open(file) as fp:
1927
+ return fp.read()
1928
+ except:
1929
+ if lang in self._lang_data:
1930
+ return f"Cannot determine {file} for {lang}"
1931
+ else:
1932
+ return f"Language {lang} is not supported."
1933
+
1934
+ def license(self, lang="eng"):
1935
+ """Return the contents of LICENSE (for omw)
1936
+ use lang=lang to get the license for an individual language"""
1937
+ return self.doc(file="LICENSE", lang=lang)
1938
+
1939
+ def readme(self, lang="eng"):
1940
+ """Return the contents of README (for omw)
1941
+ use lang=lang to get the readme for an individual language"""
1942
+ return self.doc(file="README", lang=lang)
1943
+
1944
+ def citation(self, lang="eng"):
1945
+ """Return the contents of citation.bib file (for omw)
1946
+ use lang=lang to get the citation for an individual language"""
1947
+ return self.doc(file="citation.bib", lang=lang)
1948
+
1949
+ #############################################################
1950
+ # Misc
1951
+ #############################################################
1952
+ def lemma_count(self, lemma):
1953
+ """Return the frequency count for this Lemma"""
1954
+ # Currently, count is only work for English
1955
+ if lemma._lang != "eng":
1956
+ return 0
1957
+ # open the count file if we haven't already
1958
+ if self._key_count_file is None:
1959
+ self._key_count_file = self.open("cntlist.rev")
1960
+ # find the key in the counts file and return the count
1961
+ line = _binary_search_file(self._key_count_file, lemma._key)
1962
+ if line:
1963
+ return int(line.rsplit(" ", 1)[-1])
1964
+ else:
1965
+ return 0
1966
+
1967
+ def path_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
1968
+ return synset1.path_similarity(synset2, verbose, simulate_root)
1969
+
1970
+ path_similarity.__doc__ = Synset.path_similarity.__doc__
1971
+
1972
+ def lch_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
1973
+ return synset1.lch_similarity(synset2, verbose, simulate_root)
1974
+
1975
+ lch_similarity.__doc__ = Synset.lch_similarity.__doc__
1976
+
1977
+ def wup_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
1978
+ return synset1.wup_similarity(synset2, verbose, simulate_root)
1979
+
1980
+ wup_similarity.__doc__ = Synset.wup_similarity.__doc__
1981
+
1982
+ def res_similarity(self, synset1, synset2, ic, verbose=False):
1983
+ return synset1.res_similarity(synset2, ic, verbose)
1984
+
1985
+ res_similarity.__doc__ = Synset.res_similarity.__doc__
1986
+
1987
+ def jcn_similarity(self, synset1, synset2, ic, verbose=False):
1988
+ return synset1.jcn_similarity(synset2, ic, verbose)
1989
+
1990
+ jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
1991
+
1992
+ def lin_similarity(self, synset1, synset2, ic, verbose=False):
1993
+ return synset1.lin_similarity(synset2, ic, verbose)
1994
+
1995
+ lin_similarity.__doc__ = Synset.lin_similarity.__doc__
1996
+
1997
+ #############################################################
1998
+ # Morphy
1999
+ #############################################################
2000
+ # Morphy, adapted from Oliver Steele's pywordnet
2001
+ def morphy(self, form, pos=None, check_exceptions=True):
2002
+ """
2003
+ Find a possible base form for the given form, with the given
2004
+ part of speech, by checking WordNet's list of exceptional
2005
+ forms, and by recursively stripping affixes for this part of
2006
+ speech until a form in WordNet is found.
2007
+
2008
+ >>> from nltk.corpus import wordnet as wn
2009
+ >>> print(wn.morphy('dogs'))
2010
+ dog
2011
+ >>> print(wn.morphy('churches'))
2012
+ church
2013
+ >>> print(wn.morphy('aardwolves'))
2014
+ aardwolf
2015
+ >>> print(wn.morphy('abaci'))
2016
+ abacus
2017
+ >>> wn.morphy('hardrock', wn.ADV)
2018
+ >>> print(wn.morphy('book', wn.NOUN))
2019
+ book
2020
+ >>> wn.morphy('book', wn.ADJ)
2021
+ """
2022
+
2023
+ if pos is None:
2024
+ morphy = self._morphy
2025
+ analyses = chain(a for p in POS_LIST for a in morphy(form, p))
2026
+ else:
2027
+ analyses = self._morphy(form, pos, check_exceptions)
2028
+
2029
+ # get the first one we find
2030
+ first = list(islice(analyses, 1))
2031
+ if len(first) == 1:
2032
+ return first[0]
2033
+ else:
2034
+ return None
2035
+
2036
+ MORPHOLOGICAL_SUBSTITUTIONS = {
2037
+ NOUN: [
2038
+ ("s", ""),
2039
+ ("ses", "s"),
2040
+ ("ves", "f"),
2041
+ ("xes", "x"),
2042
+ ("zes", "z"),
2043
+ ("ches", "ch"),
2044
+ ("shes", "sh"),
2045
+ ("men", "man"),
2046
+ ("ies", "y"),
2047
+ ],
2048
+ VERB: [
2049
+ ("s", ""),
2050
+ ("ies", "y"),
2051
+ ("es", "e"),
2052
+ ("es", ""),
2053
+ ("ed", "e"),
2054
+ ("ed", ""),
2055
+ ("ing", "e"),
2056
+ ("ing", ""),
2057
+ ],
2058
+ ADJ: [("er", ""), ("est", ""), ("er", "e"), ("est", "e")],
2059
+ ADV: [],
2060
+ }
2061
+
2062
+ MORPHOLOGICAL_SUBSTITUTIONS[ADJ_SAT] = MORPHOLOGICAL_SUBSTITUTIONS[ADJ]
2063
+
2064
+ def _morphy(self, form, pos, check_exceptions=True):
2065
+ # from jordanbg:
2066
+ # Given an original string x
2067
+ # 1. Apply rules once to the input to get y1, y2, y3, etc.
2068
+ # 2. Return all that are in the database
2069
+ # 3. If there are no matches, keep applying rules until you either
2070
+ # find a match or you can't go any further
2071
+
2072
+ exceptions = self._exception_map[pos]
2073
+ substitutions = self.MORPHOLOGICAL_SUBSTITUTIONS[pos]
2074
+
2075
+ def apply_rules(forms):
2076
+ return [
2077
+ form[: -len(old)] + new
2078
+ for form in forms
2079
+ for old, new in substitutions
2080
+ if form.endswith(old)
2081
+ ]
2082
+
2083
+ def filter_forms(forms):
2084
+ result = []
2085
+ seen = set()
2086
+ for form in forms:
2087
+ if form in self._lemma_pos_offset_map:
2088
+ if pos in self._lemma_pos_offset_map[form]:
2089
+ if form not in seen:
2090
+ result.append(form)
2091
+ seen.add(form)
2092
+ return result
2093
+
2094
+ # 0. Check the exception lists
2095
+ if check_exceptions:
2096
+ if form in exceptions:
2097
+ return filter_forms([form] + exceptions[form])
2098
+
2099
+ # 1. Apply rules once to the input to get y1, y2, y3, etc.
2100
+ forms = apply_rules([form])
2101
+
2102
+ # 2. Return all that are in the database (and check the original too)
2103
+ results = filter_forms([form] + forms)
2104
+ if results:
2105
+ return results
2106
+
2107
+ # 3. If there are no matches, keep applying rules until we find a match
2108
+ while forms:
2109
+ forms = apply_rules(forms)
2110
+ results = filter_forms(forms)
2111
+ if results:
2112
+ return results
2113
+
2114
+ # Return an empty list if we can't find anything
2115
+ return []
2116
+
2117
+ #############################################################
2118
+ # Create information content from corpus
2119
+ #############################################################
2120
+ def ic(self, corpus, weight_senses_equally=False, smoothing=1.0):
2121
+ """
2122
+ Creates an information content lookup dictionary from a corpus.
2123
+
2124
+ :type corpus: CorpusReader
2125
+ :param corpus: The corpus from which we create an information
2126
+ content dictionary.
2127
+ :type weight_senses_equally: bool
2128
+ :param weight_senses_equally: If this is True, gives all
2129
+ possible senses equal weight rather than dividing by the
2130
+ number of possible senses. (If a word has 3 synses, each
2131
+ sense gets 0.3333 per appearance when this is False, 1.0 when
2132
+ it is true.)
2133
+ :param smoothing: How much do we smooth synset counts (default is 1.0)
2134
+ :type smoothing: float
2135
+ :return: An information content dictionary
2136
+ """
2137
+ counts = FreqDist()
2138
+ for ww in corpus.words():
2139
+ counts[ww] += 1
2140
+
2141
+ ic = {}
2142
+ for pp in POS_LIST:
2143
+ ic[pp] = defaultdict(float)
2144
+
2145
+ # Initialize the counts with the smoothing value
2146
+ if smoothing > 0.0:
2147
+ for pp in POS_LIST:
2148
+ ic[pp][0] = smoothing
2149
+ for ss in self.all_synsets():
2150
+ pos = ss._pos
2151
+ if pos == ADJ_SAT:
2152
+ pos = ADJ
2153
+ ic[pos][ss._offset] = smoothing
2154
+
2155
+ for ww in counts:
2156
+ possible_synsets = self.synsets(ww)
2157
+ if len(possible_synsets) == 0:
2158
+ continue
2159
+
2160
+ # Distribute weight among possible synsets
2161
+ weight = float(counts[ww])
2162
+ if not weight_senses_equally:
2163
+ weight /= float(len(possible_synsets))
2164
+
2165
+ for ss in possible_synsets:
2166
+ pos = ss._pos
2167
+ if pos == ADJ_SAT:
2168
+ pos = ADJ
2169
+ for level in ss._iter_hypernym_lists():
2170
+ for hh in level:
2171
+ ic[pos][hh._offset] += weight
2172
+ # Add the weight to the root
2173
+ ic[pos][0] += weight
2174
+ return ic
2175
+
2176
+ def custom_lemmas(self, tab_file, lang):
2177
+ """
2178
+ Reads a custom tab file containing mappings of lemmas in the given
2179
+ language to Princeton WordNet 3.0 synset offsets, allowing NLTK's
2180
+ WordNet functions to then be used with that language.
2181
+
2182
+ See the "Tab files" section at https://omwn.org/omw1.html for
2183
+ documentation on the Multilingual WordNet tab file format.
2184
+
2185
+ :param tab_file: Tab file as a file or file-like object
2186
+ :type: lang str
2187
+ :param: lang ISO 639-3 code of the language of the tab file
2188
+ """
2189
+ lg = lang.split("_")[0]
2190
+ if len(lg) != 3:
2191
+ raise ValueError("lang should be a (3 character) ISO 639-3 code")
2192
+ self._lang_data[lang] = [
2193
+ defaultdict(list),
2194
+ defaultdict(list),
2195
+ defaultdict(list),
2196
+ defaultdict(list),
2197
+ ]
2198
+ for line in tab_file.readlines():
2199
+ if isinstance(line, bytes):
2200
+ # Support byte-stream files (e.g. as returned by Python 2's
2201
+ # open() function) as well as text-stream ones
2202
+ line = line.decode("utf-8")
2203
+ if not line.startswith("#"):
2204
+ triple = line.strip().split("\t")
2205
+ if len(triple) < 3:
2206
+ continue
2207
+ offset_pos, label = triple[:2]
2208
+ val = triple[-1]
2209
+ if self.map30:
2210
+ if offset_pos in self.map30:
2211
+ # Map offset_pos to current Wordnet version:
2212
+ offset_pos = self.map30[offset_pos]
2213
+ else:
2214
+ # Some OMW offsets were never in Wordnet:
2215
+ if (
2216
+ offset_pos not in self.nomap
2217
+ and offset_pos.replace("a", "s") not in self.nomap
2218
+ ):
2219
+ warnings.warn(
2220
+ f"{lang}: invalid offset {offset_pos} in '{line}'"
2221
+ )
2222
+ continue
2223
+ elif offset_pos[-1] == "a":
2224
+ wnss = self.of2ss(offset_pos)
2225
+ if wnss and wnss.pos() == "s": # Wordnet pos is "s"
2226
+ # Label OMW adjective satellites back to their Wordnet pos ("s")
2227
+ offset_pos = self.ss2of(wnss)
2228
+ pair = label.split(":")
2229
+ attr = pair[-1]
2230
+ if len(pair) == 1 or pair[0] == lg:
2231
+ if attr == "lemma":
2232
+ val = val.strip().replace(" ", "_")
2233
+ self._lang_data[lang][1][val.lower()].append(offset_pos)
2234
+ if attr in self.lg_attrs:
2235
+ self._lang_data[lang][self.lg_attrs.index(attr)][
2236
+ offset_pos
2237
+ ].append(val)
2238
+
2239
+ def disable_custom_lemmas(self, lang):
2240
+ """prevent synsets from being mistakenly added"""
2241
+ for n in range(len(self.lg_attrs)):
2242
+ self._lang_data[lang][n].default_factory = None
2243
+
2244
+ ######################################################################
2245
+ # Visualize WordNet relation graphs using Graphviz
2246
+ ######################################################################
2247
+
2248
+ def digraph(
2249
+ self,
2250
+ inputs,
2251
+ rel=lambda s: s.hypernyms(),
2252
+ pos=None,
2253
+ maxdepth=-1,
2254
+ shapes=None,
2255
+ attr=None,
2256
+ verbose=False,
2257
+ ):
2258
+ """
2259
+ Produce a graphical representation from 'inputs' (a list of
2260
+ start nodes, which can be a mix of Synsets, Lemmas and/or words),
2261
+ and a synset relation, for drawing with the 'dot' graph visualisation
2262
+ program from the Graphviz package.
2263
+
2264
+ Return a string in the DOT graph file language, which can then be
2265
+ converted to an image by nltk.parse.dependencygraph.dot2img(dot_string).
2266
+
2267
+ Optional Parameters:
2268
+ :rel: Wordnet synset relation
2269
+ :pos: for words, restricts Part of Speech to 'n', 'v', 'a' or 'r'
2270
+ :maxdepth: limit the longest path
2271
+ :shapes: dictionary of strings that trigger a specified shape
2272
+ :attr: dictionary with global graph attributes
2273
+ :verbose: warn about cycles
2274
+
2275
+ >>> from nltk.corpus import wordnet as wn
2276
+ >>> print(wn.digraph([wn.synset('dog.n.01')]))
2277
+ digraph G {
2278
+ "Synset('animal.n.01')" -> "Synset('organism.n.01')";
2279
+ "Synset('canine.n.02')" -> "Synset('carnivore.n.01')";
2280
+ "Synset('carnivore.n.01')" -> "Synset('placental.n.01')";
2281
+ "Synset('chordate.n.01')" -> "Synset('animal.n.01')";
2282
+ "Synset('dog.n.01')" -> "Synset('canine.n.02')";
2283
+ "Synset('dog.n.01')" -> "Synset('domestic_animal.n.01')";
2284
+ "Synset('domestic_animal.n.01')" -> "Synset('animal.n.01')";
2285
+ "Synset('living_thing.n.01')" -> "Synset('whole.n.02')";
2286
+ "Synset('mammal.n.01')" -> "Synset('vertebrate.n.01')";
2287
+ "Synset('object.n.01')" -> "Synset('physical_entity.n.01')";
2288
+ "Synset('organism.n.01')" -> "Synset('living_thing.n.01')";
2289
+ "Synset('physical_entity.n.01')" -> "Synset('entity.n.01')";
2290
+ "Synset('placental.n.01')" -> "Synset('mammal.n.01')";
2291
+ "Synset('vertebrate.n.01')" -> "Synset('chordate.n.01')";
2292
+ "Synset('whole.n.02')" -> "Synset('object.n.01')";
2293
+ }
2294
+ <BLANKLINE>
2295
+ """
2296
+ from nltk.util import edge_closure, edges2dot
2297
+
2298
+ synsets = set()
2299
+ edges = set()
2300
+ if not shapes:
2301
+ shapes = dict()
2302
+ if not attr:
2303
+ attr = dict()
2304
+
2305
+ def add_lemma(lem):
2306
+ ss = lem.synset()
2307
+ synsets.add(ss)
2308
+ edges.add((lem, ss))
2309
+
2310
+ for node in inputs:
2311
+ typ = type(node)
2312
+ if typ == Synset:
2313
+ synsets.add(node)
2314
+ elif typ == Lemma:
2315
+ add_lemma(node)
2316
+ elif typ == str:
2317
+ for lemma in self.lemmas(node, pos):
2318
+ add_lemma(lemma)
2319
+
2320
+ for ss in synsets:
2321
+ edges = edges.union(edge_closure(ss, rel, maxdepth, verbose))
2322
+ dot_string = edges2dot(sorted(list(edges)), shapes=shapes, attr=attr)
2323
+ return dot_string
2324
+
2325
+
2326
+ ######################################################################
2327
+ # WordNet Information Content Corpus Reader
2328
+ ######################################################################
2329
+
2330
+
2331
+ class WordNetICCorpusReader(CorpusReader):
2332
+ """
2333
+ A corpus reader for the WordNet information content corpus.
2334
+ """
2335
+
2336
+ def __init__(self, root, fileids):
2337
+ CorpusReader.__init__(self, root, fileids, encoding="utf8")
2338
+
2339
+ # this load function would be more efficient if the data was pickled
2340
+ # Note that we can't use NLTK's frequency distributions because
2341
+ # synsets are overlapping (each instance of a synset also counts
2342
+ # as an instance of its hypernyms)
2343
+ def ic(self, icfile):
2344
+ """
2345
+ Load an information content file from the wordnet_ic corpus
2346
+ and return a dictionary. This dictionary has just two keys,
2347
+ NOUN and VERB, whose values are dictionaries that map from
2348
+ synsets to information content values.
2349
+
2350
+ :type icfile: str
2351
+ :param icfile: The name of the wordnet_ic file (e.g. "ic-brown.dat")
2352
+ :return: An information content dictionary
2353
+ """
2354
+ ic = {}
2355
+ ic[NOUN] = defaultdict(float)
2356
+ ic[VERB] = defaultdict(float)
2357
+ with self.open(icfile) as fp:
2358
+ for num, line in enumerate(fp):
2359
+ if num == 0: # skip the header
2360
+ continue
2361
+ fields = line.split()
2362
+ offset = int(fields[0][:-1])
2363
+ value = float(fields[1])
2364
+ pos = _get_pos(fields[0])
2365
+ if len(fields) == 3 and fields[2] == "ROOT":
2366
+ # Store root count.
2367
+ ic[pos][0] += value
2368
+ if value != 0:
2369
+ ic[pos][offset] = value
2370
+ return ic
2371
+
2372
+
2373
+ ######################################################################
2374
+ # Similarity metrics
2375
+ ######################################################################
2376
+
2377
+ # TODO: Add in the option to manually add a new root node; this will be
2378
+ # useful for verb similarity as there exist multiple verb taxonomies.
2379
+
2380
+ # More information about the metrics is available at
2381
+ # http://marimba.d.umn.edu/similarity/measures.html
2382
+
2383
+
2384
+ def path_similarity(synset1, synset2, verbose=False, simulate_root=True):
2385
+ return synset1.path_similarity(
2386
+ synset2, verbose=verbose, simulate_root=simulate_root
2387
+ )
2388
+
2389
+
2390
+ def lch_similarity(synset1, synset2, verbose=False, simulate_root=True):
2391
+ return synset1.lch_similarity(synset2, verbose=verbose, simulate_root=simulate_root)
2392
+
2393
+
2394
+ def wup_similarity(synset1, synset2, verbose=False, simulate_root=True):
2395
+ return synset1.wup_similarity(synset2, verbose=verbose, simulate_root=simulate_root)
2396
+
2397
+
2398
+ def res_similarity(synset1, synset2, ic, verbose=False):
2399
+ return synset1.res_similarity(synset2, ic, verbose=verbose)
2400
+
2401
+
2402
+ def jcn_similarity(synset1, synset2, ic, verbose=False):
2403
+ return synset1.jcn_similarity(synset2, ic, verbose=verbose)
2404
+
2405
+
2406
+ def lin_similarity(synset1, synset2, ic, verbose=False):
2407
+ return synset1.lin_similarity(synset2, ic, verbose=verbose)
2408
+
2409
+
2410
+ path_similarity.__doc__ = Synset.path_similarity.__doc__
2411
+ lch_similarity.__doc__ = Synset.lch_similarity.__doc__
2412
+ wup_similarity.__doc__ = Synset.wup_similarity.__doc__
2413
+ res_similarity.__doc__ = Synset.res_similarity.__doc__
2414
+ jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
2415
+ lin_similarity.__doc__ = Synset.lin_similarity.__doc__
2416
+
2417
+
2418
+ def _lcs_ic(synset1, synset2, ic, verbose=False):
2419
+ """
2420
+ Get the information content of the least common subsumer that has
2421
+ the highest information content value. If two nodes have no
2422
+ explicit common subsumer, assume that they share an artificial
2423
+ root node that is the hypernym of all explicit roots.
2424
+
2425
+ :type synset1: Synset
2426
+ :param synset1: First input synset.
2427
+ :type synset2: Synset
2428
+ :param synset2: Second input synset. Must be the same part of
2429
+ speech as the first synset.
2430
+ :type ic: dict
2431
+ :param ic: an information content object (as returned by ``load_ic()``).
2432
+ :return: The information content of the two synsets and their most
2433
+ informative subsumer
2434
+ """
2435
+ if synset1._pos != synset2._pos:
2436
+ raise WordNetError(
2437
+ "Computing the least common subsumer requires "
2438
+ "%s and %s to have the same part of speech." % (synset1, synset2)
2439
+ )
2440
+
2441
+ ic1 = information_content(synset1, ic)
2442
+ ic2 = information_content(synset2, ic)
2443
+ subsumers = synset1.common_hypernyms(synset2)
2444
+ if len(subsumers) == 0:
2445
+ subsumer_ic = 0
2446
+ else:
2447
+ subsumer_ic = max(information_content(s, ic) for s in subsumers)
2448
+
2449
+ if verbose:
2450
+ print("> LCS Subsumer by content:", subsumer_ic)
2451
+
2452
+ return ic1, ic2, subsumer_ic
2453
+
2454
+
2455
+ # Utility functions
2456
+
2457
+
2458
+ def information_content(synset, ic):
2459
+ pos = synset._pos
2460
+ if pos == ADJ_SAT:
2461
+ pos = ADJ
2462
+ try:
2463
+ icpos = ic[pos]
2464
+ except KeyError as e:
2465
+ msg = "Information content file has no entries for part-of-speech: %s"
2466
+ raise WordNetError(msg % pos) from e
2467
+
2468
+ counts = icpos[synset._offset]
2469
+ if counts == 0:
2470
+ return _INF
2471
+ else:
2472
+ return -math.log(counts / icpos[0])
2473
+
2474
+
2475
+ # get the part of speech (NOUN or VERB) from the information content record
2476
+ # (each identifier has a 'n' or 'v' suffix)
2477
+
2478
+
2479
+ def _get_pos(field):
2480
+ if field[-1] == "n":
2481
+ return NOUN
2482
+ elif field[-1] == "v":
2483
+ return VERB
2484
+ else:
2485
+ msg = (
2486
+ "Unidentified part of speech in WordNet Information Content file "
2487
+ "for field %s" % field
2488
+ )
2489
+ raise ValueError(msg)
lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: York-Toronto-Helsinki Parsed Corpus of Old English Prose (YCOE)
2
+ #
3
+ # Copyright (C) 2001-2015 NLTK Project
4
+ # Author: Selina Dennis <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old
10
+ English Prose (YCOE), a 1.5 million word syntactically-annotated
11
+ corpus of Old English prose texts. The corpus is distributed by the
12
+ Oxford Text Archive: http://www.ota.ahds.ac.uk/ It is not included
13
+ with NLTK.
14
+
15
+ The YCOE corpus is divided into 100 files, each representing
16
+ an Old English prose text. Tags used within each text complies
17
+ to the YCOE standard: https://www-users.york.ac.uk/~lang22/YCOE/YcoeHome.htm
18
+ """
19
+
20
+ import os
21
+ import re
22
+
23
+ from nltk.corpus.reader.api import *
24
+ from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
25
+ from nltk.corpus.reader.tagged import TaggedCorpusReader
26
+ from nltk.corpus.reader.util import *
27
+ from nltk.tokenize import RegexpTokenizer
28
+
29
+
30
+ class YCOECorpusReader(CorpusReader):
31
+ """
32
+ Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old
33
+ English Prose (YCOE), a 1.5 million word syntactically-annotated
34
+ corpus of Old English prose texts.
35
+ """
36
+
37
+ def __init__(self, root, encoding="utf8"):
38
+ CorpusReader.__init__(self, root, [], encoding)
39
+
40
+ self._psd_reader = YCOEParseCorpusReader(
41
+ self.root.join("psd"), ".*", ".psd", encoding=encoding
42
+ )
43
+ self._pos_reader = YCOETaggedCorpusReader(self.root.join("pos"), ".*", ".pos")
44
+
45
+ # Make sure we have a consistent set of items:
46
+ documents = {f[:-4] for f in self._psd_reader.fileids()}
47
+ if {f[:-4] for f in self._pos_reader.fileids()} != documents:
48
+ raise ValueError('Items in "psd" and "pos" ' "subdirectories do not match.")
49
+
50
+ fileids = sorted(
51
+ ["%s.psd" % doc for doc in documents]
52
+ + ["%s.pos" % doc for doc in documents]
53
+ )
54
+ CorpusReader.__init__(self, root, fileids, encoding)
55
+ self._documents = sorted(documents)
56
+
57
+ def documents(self, fileids=None):
58
+ """
59
+ Return a list of document identifiers for all documents in
60
+ this corpus, or for the documents with the given file(s) if
61
+ specified.
62
+ """
63
+ if fileids is None:
64
+ return self._documents
65
+ if isinstance(fileids, str):
66
+ fileids = [fileids]
67
+ for f in fileids:
68
+ if f not in self._fileids:
69
+ raise KeyError("File id %s not found" % fileids)
70
+ # Strip off the '.pos' and '.psd' extensions.
71
+ return sorted({f[:-4] for f in fileids})
72
+
73
+ def fileids(self, documents=None):
74
+ """
75
+ Return a list of file identifiers for the files that make up
76
+ this corpus, or that store the given document(s) if specified.
77
+ """
78
+ if documents is None:
79
+ return self._fileids
80
+ elif isinstance(documents, str):
81
+ documents = [documents]
82
+ return sorted(
83
+ set(
84
+ ["%s.pos" % doc for doc in documents]
85
+ + ["%s.psd" % doc for doc in documents]
86
+ )
87
+ )
88
+
89
+ def _getfileids(self, documents, subcorpus):
90
+ """
91
+ Helper that selects the appropriate fileids for a given set of
92
+ documents from a given subcorpus (pos or psd).
93
+ """
94
+ if documents is None:
95
+ documents = self._documents
96
+ else:
97
+ if isinstance(documents, str):
98
+ documents = [documents]
99
+ for document in documents:
100
+ if document not in self._documents:
101
+ if document[-4:] in (".pos", ".psd"):
102
+ raise ValueError(
103
+ "Expected a document identifier, not a file "
104
+ "identifier. (Use corpus.documents() to get "
105
+ "a list of document identifiers."
106
+ )
107
+ else:
108
+ raise ValueError("Document identifier %s not found" % document)
109
+ return [f"{d}.{subcorpus}" for d in documents]
110
+
111
+ # Delegate to one of our two sub-readers:
112
+ def words(self, documents=None):
113
+ return self._pos_reader.words(self._getfileids(documents, "pos"))
114
+
115
+ def sents(self, documents=None):
116
+ return self._pos_reader.sents(self._getfileids(documents, "pos"))
117
+
118
+ def paras(self, documents=None):
119
+ return self._pos_reader.paras(self._getfileids(documents, "pos"))
120
+
121
+ def tagged_words(self, documents=None):
122
+ return self._pos_reader.tagged_words(self._getfileids(documents, "pos"))
123
+
124
+ def tagged_sents(self, documents=None):
125
+ return self._pos_reader.tagged_sents(self._getfileids(documents, "pos"))
126
+
127
+ def tagged_paras(self, documents=None):
128
+ return self._pos_reader.tagged_paras(self._getfileids(documents, "pos"))
129
+
130
+ def parsed_sents(self, documents=None):
131
+ return self._psd_reader.parsed_sents(self._getfileids(documents, "psd"))
132
+
133
+
134
+ class YCOEParseCorpusReader(BracketParseCorpusReader):
135
+ """Specialized version of the standard bracket parse corpus reader
136
+ that strips out (CODE ...) and (ID ...) nodes."""
137
+
138
+ def _parse(self, t):
139
+ t = re.sub(r"(?u)\((CODE|ID)[^\)]*\)", "", t)
140
+ if re.match(r"\s*\(\s*\)\s*$", t):
141
+ return None
142
+ return BracketParseCorpusReader._parse(self, t)
143
+
144
+
145
+ class YCOETaggedCorpusReader(TaggedCorpusReader):
146
+ def __init__(self, root, items, encoding="utf8"):
147
+ gaps_re = r"(?u)(?<=/\.)\s+|\s*\S*_CODE\s*|\s*\S*_ID\s*"
148
+ sent_tokenizer = RegexpTokenizer(gaps_re, gaps=True)
149
+ TaggedCorpusReader.__init__(
150
+ self, root, items, sep="_", sent_tokenizer=sent_tokenizer
151
+ )
152
+
153
+
154
+ #: A list of all documents and their titles in ycoe.
155
+ documents = {
156
+ "coadrian.o34": "Adrian and Ritheus",
157
+ "coaelhom.o3": "Ælfric, Supplemental Homilies",
158
+ "coaelive.o3": "Ælfric's Lives of Saints",
159
+ "coalcuin": "Alcuin De virtutibus et vitiis",
160
+ "coalex.o23": "Alexander's Letter to Aristotle",
161
+ "coapollo.o3": "Apollonius of Tyre",
162
+ "coaugust": "Augustine",
163
+ "cobede.o2": "Bede's History of the English Church",
164
+ "cobenrul.o3": "Benedictine Rule",
165
+ "coblick.o23": "Blickling Homilies",
166
+ "coboeth.o2": "Boethius' Consolation of Philosophy",
167
+ "cobyrhtf.o3": "Byrhtferth's Manual",
168
+ "cocanedgD": "Canons of Edgar (D)",
169
+ "cocanedgX": "Canons of Edgar (X)",
170
+ "cocathom1.o3": "Ælfric's Catholic Homilies I",
171
+ "cocathom2.o3": "Ælfric's Catholic Homilies II",
172
+ "cochad.o24": "Saint Chad",
173
+ "cochdrul": "Chrodegang of Metz, Rule",
174
+ "cochristoph": "Saint Christopher",
175
+ "cochronA.o23": "Anglo-Saxon Chronicle A",
176
+ "cochronC": "Anglo-Saxon Chronicle C",
177
+ "cochronD": "Anglo-Saxon Chronicle D",
178
+ "cochronE.o34": "Anglo-Saxon Chronicle E",
179
+ "cocura.o2": "Cura Pastoralis",
180
+ "cocuraC": "Cura Pastoralis (Cotton)",
181
+ "codicts.o34": "Dicts of Cato",
182
+ "codocu1.o1": "Documents 1 (O1)",
183
+ "codocu2.o12": "Documents 2 (O1/O2)",
184
+ "codocu2.o2": "Documents 2 (O2)",
185
+ "codocu3.o23": "Documents 3 (O2/O3)",
186
+ "codocu3.o3": "Documents 3 (O3)",
187
+ "codocu4.o24": "Documents 4 (O2/O4)",
188
+ "coeluc1": "Honorius of Autun, Elucidarium 1",
189
+ "coeluc2": "Honorius of Autun, Elucidarium 1",
190
+ "coepigen.o3": "Ælfric's Epilogue to Genesis",
191
+ "coeuphr": "Saint Euphrosyne",
192
+ "coeust": "Saint Eustace and his companions",
193
+ "coexodusP": "Exodus (P)",
194
+ "cogenesiC": "Genesis (C)",
195
+ "cogregdC.o24": "Gregory's Dialogues (C)",
196
+ "cogregdH.o23": "Gregory's Dialogues (H)",
197
+ "coherbar": "Pseudo-Apuleius, Herbarium",
198
+ "coinspolD.o34": "Wulfstan's Institute of Polity (D)",
199
+ "coinspolX": "Wulfstan's Institute of Polity (X)",
200
+ "cojames": "Saint James",
201
+ "colacnu.o23": "Lacnunga",
202
+ "colaece.o2": "Leechdoms",
203
+ "colaw1cn.o3": "Laws, Cnut I",
204
+ "colaw2cn.o3": "Laws, Cnut II",
205
+ "colaw5atr.o3": "Laws, Æthelred V",
206
+ "colaw6atr.o3": "Laws, Æthelred VI",
207
+ "colawaf.o2": "Laws, Alfred",
208
+ "colawafint.o2": "Alfred's Introduction to Laws",
209
+ "colawger.o34": "Laws, Gerefa",
210
+ "colawine.ox2": "Laws, Ine",
211
+ "colawnorthu.o3": "Northumbra Preosta Lagu",
212
+ "colawwllad.o4": "Laws, William I, Lad",
213
+ "coleofri.o4": "Leofric",
214
+ "colsigef.o3": "Ælfric's Letter to Sigefyrth",
215
+ "colsigewB": "Ælfric's Letter to Sigeweard (B)",
216
+ "colsigewZ.o34": "Ælfric's Letter to Sigeweard (Z)",
217
+ "colwgeat": "Ælfric's Letter to Wulfgeat",
218
+ "colwsigeT": "Ælfric's Letter to Wulfsige (T)",
219
+ "colwsigeXa.o34": "Ælfric's Letter to Wulfsige (Xa)",
220
+ "colwstan1.o3": "Ælfric's Letter to Wulfstan I",
221
+ "colwstan2.o3": "Ælfric's Letter to Wulfstan II",
222
+ "comargaC.o34": "Saint Margaret (C)",
223
+ "comargaT": "Saint Margaret (T)",
224
+ "comart1": "Martyrology, I",
225
+ "comart2": "Martyrology, II",
226
+ "comart3.o23": "Martyrology, III",
227
+ "comarvel.o23": "Marvels of the East",
228
+ "comary": "Mary of Egypt",
229
+ "coneot": "Saint Neot",
230
+ "conicodA": "Gospel of Nicodemus (A)",
231
+ "conicodC": "Gospel of Nicodemus (C)",
232
+ "conicodD": "Gospel of Nicodemus (D)",
233
+ "conicodE": "Gospel of Nicodemus (E)",
234
+ "coorosiu.o2": "Orosius",
235
+ "cootest.o3": "Heptateuch",
236
+ "coprefcath1.o3": "Ælfric's Preface to Catholic Homilies I",
237
+ "coprefcath2.o3": "Ælfric's Preface to Catholic Homilies II",
238
+ "coprefcura.o2": "Preface to the Cura Pastoralis",
239
+ "coprefgen.o3": "Ælfric's Preface to Genesis",
240
+ "copreflives.o3": "Ælfric's Preface to Lives of Saints",
241
+ "coprefsolilo": "Preface to Augustine's Soliloquies",
242
+ "coquadru.o23": "Pseudo-Apuleius, Medicina de quadrupedibus",
243
+ "corood": "History of the Holy Rood-Tree",
244
+ "cosevensl": "Seven Sleepers",
245
+ "cosolilo": "St. Augustine's Soliloquies",
246
+ "cosolsat1.o4": "Solomon and Saturn I",
247
+ "cosolsat2": "Solomon and Saturn II",
248
+ "cotempo.o3": "Ælfric's De Temporibus Anni",
249
+ "coverhom": "Vercelli Homilies",
250
+ "coverhomE": "Vercelli Homilies (E)",
251
+ "coverhomL": "Vercelli Homilies (L)",
252
+ "covinceB": "Saint Vincent (Bodley 343)",
253
+ "covinsal": "Vindicta Salvatoris",
254
+ "cowsgosp.o3": "West-Saxon Gospels",
255
+ "cowulf.o34": "Wulfstan's Homilies",
256
+ }
lib/python3.10/site-packages/nltk/corpus/util.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus Reader Utility Functions
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ ######################################################################
9
+ # { Lazy Corpus Loader
10
+ ######################################################################
11
+
12
+ import gc
13
+ import re
14
+
15
+ import nltk
16
+
17
+ TRY_ZIPFILE_FIRST = False
18
+
19
+
20
+ class LazyCorpusLoader:
21
+ """
22
+ To see the API documentation for this lazily loaded corpus, first
23
+ run corpus.ensure_loaded(), and then run help(this_corpus).
24
+
25
+ LazyCorpusLoader is a proxy object which is used to stand in for a
26
+ corpus object before the corpus is loaded. This allows NLTK to
27
+ create an object for each corpus, but defer the costs associated
28
+ with loading those corpora until the first time that they're
29
+ actually accessed.
30
+
31
+ The first time this object is accessed in any way, it will load
32
+ the corresponding corpus, and transform itself into that corpus
33
+ (by modifying its own ``__class__`` and ``__dict__`` attributes).
34
+
35
+ If the corpus can not be found, then accessing this object will
36
+ raise an exception, displaying installation instructions for the
37
+ NLTK data package. Once they've properly installed the data
38
+ package (or modified ``nltk.data.path`` to point to its location),
39
+ they can then use the corpus object without restarting python.
40
+
41
+ :param name: The name of the corpus
42
+ :type name: str
43
+ :param reader_cls: The specific CorpusReader class, e.g. PlaintextCorpusReader, WordListCorpusReader
44
+ :type reader: nltk.corpus.reader.api.CorpusReader
45
+ :param nltk_data_subdir: The subdirectory where the corpus is stored.
46
+ :type nltk_data_subdir: str
47
+ :param `*args`: Any other non-keywords arguments that `reader_cls` might need.
48
+ :param `**kwargs`: Any other keywords arguments that `reader_cls` might need.
49
+ """
50
+
51
+ def __init__(self, name, reader_cls, *args, **kwargs):
52
+ from nltk.corpus.reader.api import CorpusReader
53
+
54
+ assert issubclass(reader_cls, CorpusReader)
55
+ self.__name = self.__name__ = name
56
+ self.__reader_cls = reader_cls
57
+ # If nltk_data_subdir is set explicitly
58
+ if "nltk_data_subdir" in kwargs:
59
+ # Use the specified subdirectory path
60
+ self.subdir = kwargs["nltk_data_subdir"]
61
+ # Pops the `nltk_data_subdir` argument, we don't need it anymore.
62
+ kwargs.pop("nltk_data_subdir", None)
63
+ else: # Otherwise use 'nltk_data/corpora'
64
+ self.subdir = "corpora"
65
+ self.__args = args
66
+ self.__kwargs = kwargs
67
+
68
+ def __load(self):
69
+ # Find the corpus root directory.
70
+ zip_name = re.sub(r"(([^/]+)(/.*)?)", r"\2.zip/\1/", self.__name)
71
+ if TRY_ZIPFILE_FIRST:
72
+ try:
73
+ root = nltk.data.find(f"{self.subdir}/{zip_name}")
74
+ except LookupError as e:
75
+ try:
76
+ root = nltk.data.find(f"{self.subdir}/{self.__name}")
77
+ except LookupError:
78
+ raise e
79
+ else:
80
+ try:
81
+ root = nltk.data.find(f"{self.subdir}/{self.__name}")
82
+ except LookupError as e:
83
+ try:
84
+ root = nltk.data.find(f"{self.subdir}/{zip_name}")
85
+ except LookupError:
86
+ raise e
87
+
88
+ # Load the corpus.
89
+ corpus = self.__reader_cls(root, *self.__args, **self.__kwargs)
90
+
91
+ # This is where the magic happens! Transform ourselves into
92
+ # the corpus by modifying our own __dict__ and __class__ to
93
+ # match that of the corpus.
94
+
95
+ args, kwargs = self.__args, self.__kwargs
96
+ name, reader_cls = self.__name, self.__reader_cls
97
+
98
+ self.__dict__ = corpus.__dict__
99
+ self.__class__ = corpus.__class__
100
+
101
+ # _unload support: assign __dict__ and __class__ back, then do GC.
102
+ # after reassigning __dict__ there shouldn't be any references to
103
+ # corpus data so the memory should be deallocated after gc.collect()
104
+ def _unload(self):
105
+ lazy_reader = LazyCorpusLoader(name, reader_cls, *args, **kwargs)
106
+ self.__dict__ = lazy_reader.__dict__
107
+ self.__class__ = lazy_reader.__class__
108
+ gc.collect()
109
+
110
+ self._unload = _make_bound_method(_unload, self)
111
+
112
+ def __getattr__(self, attr):
113
+
114
+ # Fix for inspect.isclass under Python 2.6
115
+ # (see https://bugs.python.org/issue1225107).
116
+ # Without this fix tests may take extra 1.5GB RAM
117
+ # because all corpora gets loaded during test collection.
118
+ if attr == "__bases__":
119
+ raise AttributeError("LazyCorpusLoader object has no attribute '__bases__'")
120
+
121
+ self.__load()
122
+ # This looks circular, but its not, since __load() changes our
123
+ # __class__ to something new:
124
+ return getattr(self, attr)
125
+
126
+ def __repr__(self):
127
+ return "<{} in {!r} (not loaded yet)>".format(
128
+ self.__reader_cls.__name__,
129
+ ".../corpora/" + self.__name,
130
+ )
131
+
132
+ def _unload(self):
133
+ # If an exception occurs during corpus loading then
134
+ # '_unload' method may be unattached, so __getattr__ can be called;
135
+ # we shouldn't trigger corpus loading again in this case.
136
+ pass
137
+
138
+
139
+ def _make_bound_method(func, self):
140
+ """
141
+ Magic for creating bound methods (used for _unload).
142
+ """
143
+
144
+ class Foo:
145
+ def meth(self):
146
+ pass
147
+
148
+ f = Foo()
149
+ bound_method = type(f.meth)
150
+
151
+ try:
152
+ return bound_method(func, self, self.__class__)
153
+ except TypeError: # python3
154
+ return bound_method(func, self)
lib/python3.10/site-packages/nltk/inference/resolution.py ADDED
@@ -0,0 +1,759 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: First-order Resolution-based Theorem Prover
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Module for a resolution-based First Order theorem prover.
11
+ """
12
+
13
+ import operator
14
+ from collections import defaultdict
15
+ from functools import reduce
16
+
17
+ from nltk.inference.api import BaseProverCommand, Prover
18
+ from nltk.sem import skolemize
19
+ from nltk.sem.logic import (
20
+ AndExpression,
21
+ ApplicationExpression,
22
+ EqualityExpression,
23
+ Expression,
24
+ IndividualVariableExpression,
25
+ NegatedExpression,
26
+ OrExpression,
27
+ Variable,
28
+ VariableExpression,
29
+ is_indvar,
30
+ unique_variable,
31
+ )
32
+
33
+
34
+ class ProverParseError(Exception):
35
+ pass
36
+
37
+
38
+ class ResolutionProver(Prover):
39
+ ANSWER_KEY = "ANSWER"
40
+ _assume_false = True
41
+
42
+ def _prove(self, goal=None, assumptions=None, verbose=False):
43
+ """
44
+ :param goal: Input expression to prove
45
+ :type goal: sem.Expression
46
+ :param assumptions: Input expressions to use as assumptions in the proof
47
+ :type assumptions: list(sem.Expression)
48
+ """
49
+ if not assumptions:
50
+ assumptions = []
51
+
52
+ result = None
53
+ try:
54
+ clauses = []
55
+ if goal:
56
+ clauses.extend(clausify(-goal))
57
+ for a in assumptions:
58
+ clauses.extend(clausify(a))
59
+ result, clauses = self._attempt_proof(clauses)
60
+ if verbose:
61
+ print(ResolutionProverCommand._decorate_clauses(clauses))
62
+ except RuntimeError as e:
63
+ if self._assume_false and str(e).startswith(
64
+ "maximum recursion depth exceeded"
65
+ ):
66
+ result = False
67
+ clauses = []
68
+ else:
69
+ if verbose:
70
+ print(e)
71
+ else:
72
+ raise e
73
+ return (result, clauses)
74
+
75
+ def _attempt_proof(self, clauses):
76
+ # map indices to lists of indices, to store attempted unifications
77
+ tried = defaultdict(list)
78
+
79
+ i = 0
80
+ while i < len(clauses):
81
+ if not clauses[i].is_tautology():
82
+ # since we try clauses in order, we should start after the last
83
+ # index tried
84
+ if tried[i]:
85
+ j = tried[i][-1] + 1
86
+ else:
87
+ j = i + 1 # nothing tried yet for 'i', so start with the next
88
+
89
+ while j < len(clauses):
90
+ # don't: 1) unify a clause with itself,
91
+ # 2) use tautologies
92
+ if i != j and j and not clauses[j].is_tautology():
93
+ tried[i].append(j)
94
+ newclauses = clauses[i].unify(clauses[j])
95
+ if newclauses:
96
+ for newclause in newclauses:
97
+ newclause._parents = (i + 1, j + 1)
98
+ clauses.append(newclause)
99
+ if not len(newclause): # if there's an empty clause
100
+ return (True, clauses)
101
+ i = -1 # since we added a new clause, restart from the top
102
+ break
103
+ j += 1
104
+ i += 1
105
+ return (False, clauses)
106
+
107
+
108
+ class ResolutionProverCommand(BaseProverCommand):
109
+ def __init__(self, goal=None, assumptions=None, prover=None):
110
+ """
111
+ :param goal: Input expression to prove
112
+ :type goal: sem.Expression
113
+ :param assumptions: Input expressions to use as assumptions in
114
+ the proof.
115
+ :type assumptions: list(sem.Expression)
116
+ """
117
+ if prover is not None:
118
+ assert isinstance(prover, ResolutionProver)
119
+ else:
120
+ prover = ResolutionProver()
121
+
122
+ BaseProverCommand.__init__(self, prover, goal, assumptions)
123
+ self._clauses = None
124
+
125
+ def prove(self, verbose=False):
126
+ """
127
+ Perform the actual proof. Store the result to prevent unnecessary
128
+ re-proving.
129
+ """
130
+ if self._result is None:
131
+ self._result, clauses = self._prover._prove(
132
+ self.goal(), self.assumptions(), verbose
133
+ )
134
+ self._clauses = clauses
135
+ self._proof = ResolutionProverCommand._decorate_clauses(clauses)
136
+ return self._result
137
+
138
+ def find_answers(self, verbose=False):
139
+ self.prove(verbose)
140
+
141
+ answers = set()
142
+ answer_ex = VariableExpression(Variable(ResolutionProver.ANSWER_KEY))
143
+ for clause in self._clauses:
144
+ for term in clause:
145
+ if (
146
+ isinstance(term, ApplicationExpression)
147
+ and term.function == answer_ex
148
+ and not isinstance(term.argument, IndividualVariableExpression)
149
+ ):
150
+ answers.add(term.argument)
151
+ return answers
152
+
153
+ @staticmethod
154
+ def _decorate_clauses(clauses):
155
+ """
156
+ Decorate the proof output.
157
+ """
158
+ out = ""
159
+ max_clause_len = max(len(str(clause)) for clause in clauses)
160
+ max_seq_len = len(str(len(clauses)))
161
+ for i in range(len(clauses)):
162
+ parents = "A"
163
+ taut = ""
164
+ if clauses[i].is_tautology():
165
+ taut = "Tautology"
166
+ if clauses[i]._parents:
167
+ parents = str(clauses[i]._parents)
168
+ parents = " " * (max_clause_len - len(str(clauses[i])) + 1) + parents
169
+ seq = " " * (max_seq_len - len(str(i + 1))) + str(i + 1)
170
+ out += f"[{seq}] {clauses[i]} {parents} {taut}\n"
171
+ return out
172
+
173
+
174
+ class Clause(list):
175
+ def __init__(self, data):
176
+ list.__init__(self, data)
177
+ self._is_tautology = None
178
+ self._parents = None
179
+
180
+ def unify(self, other, bindings=None, used=None, skipped=None, debug=False):
181
+ """
182
+ Attempt to unify this Clause with the other, returning a list of
183
+ resulting, unified, Clauses.
184
+
185
+ :param other: ``Clause`` with which to unify
186
+ :param bindings: ``BindingDict`` containing bindings that should be used
187
+ during the unification
188
+ :param used: tuple of two lists of atoms. The first lists the
189
+ atoms from 'self' that were successfully unified with atoms from
190
+ 'other'. The second lists the atoms from 'other' that were successfully
191
+ unified with atoms from 'self'.
192
+ :param skipped: tuple of two ``Clause`` objects. The first is a list of all
193
+ the atoms from the 'self' Clause that have not been unified with
194
+ anything on the path. The second is same thing for the 'other' Clause.
195
+ :param debug: bool indicating whether debug statements should print
196
+ :return: list containing all the resulting ``Clause`` objects that could be
197
+ obtained by unification
198
+ """
199
+ if bindings is None:
200
+ bindings = BindingDict()
201
+ if used is None:
202
+ used = ([], [])
203
+ if skipped is None:
204
+ skipped = ([], [])
205
+ if isinstance(debug, bool):
206
+ debug = DebugObject(debug)
207
+
208
+ newclauses = _iterate_first(
209
+ self, other, bindings, used, skipped, _complete_unify_path, debug
210
+ )
211
+
212
+ # remove subsumed clauses. make a list of all indices of subsumed
213
+ # clauses, and then remove them from the list
214
+ subsumed = []
215
+ for i, c1 in enumerate(newclauses):
216
+ if i not in subsumed:
217
+ for j, c2 in enumerate(newclauses):
218
+ if i != j and j not in subsumed and c1.subsumes(c2):
219
+ subsumed.append(j)
220
+ result = []
221
+ for i in range(len(newclauses)):
222
+ if i not in subsumed:
223
+ result.append(newclauses[i])
224
+
225
+ return result
226
+
227
+ def isSubsetOf(self, other):
228
+ """
229
+ Return True iff every term in 'self' is a term in 'other'.
230
+
231
+ :param other: ``Clause``
232
+ :return: bool
233
+ """
234
+ for a in self:
235
+ if a not in other:
236
+ return False
237
+ return True
238
+
239
+ def subsumes(self, other):
240
+ """
241
+ Return True iff 'self' subsumes 'other', this is, if there is a
242
+ substitution such that every term in 'self' can be unified with a term
243
+ in 'other'.
244
+
245
+ :param other: ``Clause``
246
+ :return: bool
247
+ """
248
+ negatedother = []
249
+ for atom in other:
250
+ if isinstance(atom, NegatedExpression):
251
+ negatedother.append(atom.term)
252
+ else:
253
+ negatedother.append(-atom)
254
+
255
+ negatedotherClause = Clause(negatedother)
256
+
257
+ bindings = BindingDict()
258
+ used = ([], [])
259
+ skipped = ([], [])
260
+ debug = DebugObject(False)
261
+
262
+ return (
263
+ len(
264
+ _iterate_first(
265
+ self,
266
+ negatedotherClause,
267
+ bindings,
268
+ used,
269
+ skipped,
270
+ _subsumes_finalize,
271
+ debug,
272
+ )
273
+ )
274
+ > 0
275
+ )
276
+
277
+ def __getslice__(self, start, end):
278
+ return Clause(list.__getslice__(self, start, end))
279
+
280
+ def __sub__(self, other):
281
+ return Clause([a for a in self if a not in other])
282
+
283
+ def __add__(self, other):
284
+ return Clause(list.__add__(self, other))
285
+
286
+ def is_tautology(self):
287
+ """
288
+ Self is a tautology if it contains ground terms P and -P. The ground
289
+ term, P, must be an exact match, ie, not using unification.
290
+ """
291
+ if self._is_tautology is not None:
292
+ return self._is_tautology
293
+ for i, a in enumerate(self):
294
+ if not isinstance(a, EqualityExpression):
295
+ j = len(self) - 1
296
+ while j > i:
297
+ b = self[j]
298
+ if isinstance(a, NegatedExpression):
299
+ if a.term == b:
300
+ self._is_tautology = True
301
+ return True
302
+ elif isinstance(b, NegatedExpression):
303
+ if a == b.term:
304
+ self._is_tautology = True
305
+ return True
306
+ j -= 1
307
+ self._is_tautology = False
308
+ return False
309
+
310
+ def free(self):
311
+ return reduce(operator.or_, ((atom.free() | atom.constants()) for atom in self))
312
+
313
+ def replace(self, variable, expression):
314
+ """
315
+ Replace every instance of variable with expression across every atom
316
+ in the clause
317
+
318
+ :param variable: ``Variable``
319
+ :param expression: ``Expression``
320
+ """
321
+ return Clause([atom.replace(variable, expression) for atom in self])
322
+
323
+ def substitute_bindings(self, bindings):
324
+ """
325
+ Replace every binding
326
+
327
+ :param bindings: A list of tuples mapping Variable Expressions to the
328
+ Expressions to which they are bound.
329
+ :return: ``Clause``
330
+ """
331
+ return Clause([atom.substitute_bindings(bindings) for atom in self])
332
+
333
+ def __str__(self):
334
+ return "{" + ", ".join("%s" % item for item in self) + "}"
335
+
336
+ def __repr__(self):
337
+ return "%s" % self
338
+
339
+
340
+ def _iterate_first(first, second, bindings, used, skipped, finalize_method, debug):
341
+ """
342
+ This method facilitates movement through the terms of 'self'
343
+ """
344
+ debug.line(f"unify({first},{second}) {bindings}")
345
+
346
+ if not len(first) or not len(second): # if no more recursions can be performed
347
+ return finalize_method(first, second, bindings, used, skipped, debug)
348
+ else:
349
+ # explore this 'self' atom
350
+ result = _iterate_second(
351
+ first, second, bindings, used, skipped, finalize_method, debug + 1
352
+ )
353
+
354
+ # skip this possible 'self' atom
355
+ newskipped = (skipped[0] + [first[0]], skipped[1])
356
+ result += _iterate_first(
357
+ first[1:], second, bindings, used, newskipped, finalize_method, debug + 1
358
+ )
359
+
360
+ try:
361
+ newbindings, newused, unused = _unify_terms(
362
+ first[0], second[0], bindings, used
363
+ )
364
+ # Unification found, so progress with this line of unification
365
+ # put skipped and unused terms back into play for later unification.
366
+ newfirst = first[1:] + skipped[0] + unused[0]
367
+ newsecond = second[1:] + skipped[1] + unused[1]
368
+ result += _iterate_first(
369
+ newfirst,
370
+ newsecond,
371
+ newbindings,
372
+ newused,
373
+ ([], []),
374
+ finalize_method,
375
+ debug + 1,
376
+ )
377
+ except BindingException:
378
+ # the atoms could not be unified,
379
+ pass
380
+
381
+ return result
382
+
383
+
384
+ def _iterate_second(first, second, bindings, used, skipped, finalize_method, debug):
385
+ """
386
+ This method facilitates movement through the terms of 'other'
387
+ """
388
+ debug.line(f"unify({first},{second}) {bindings}")
389
+
390
+ if not len(first) or not len(second): # if no more recursions can be performed
391
+ return finalize_method(first, second, bindings, used, skipped, debug)
392
+ else:
393
+ # skip this possible pairing and move to the next
394
+ newskipped = (skipped[0], skipped[1] + [second[0]])
395
+ result = _iterate_second(
396
+ first, second[1:], bindings, used, newskipped, finalize_method, debug + 1
397
+ )
398
+
399
+ try:
400
+ newbindings, newused, unused = _unify_terms(
401
+ first[0], second[0], bindings, used
402
+ )
403
+ # Unification found, so progress with this line of unification
404
+ # put skipped and unused terms back into play for later unification.
405
+ newfirst = first[1:] + skipped[0] + unused[0]
406
+ newsecond = second[1:] + skipped[1] + unused[1]
407
+ result += _iterate_second(
408
+ newfirst,
409
+ newsecond,
410
+ newbindings,
411
+ newused,
412
+ ([], []),
413
+ finalize_method,
414
+ debug + 1,
415
+ )
416
+ except BindingException:
417
+ # the atoms could not be unified,
418
+ pass
419
+
420
+ return result
421
+
422
+
423
+ def _unify_terms(a, b, bindings=None, used=None):
424
+ """
425
+ This method attempts to unify two terms. Two expressions are unifiable
426
+ if there exists a substitution function S such that S(a) == S(-b).
427
+
428
+ :param a: ``Expression``
429
+ :param b: ``Expression``
430
+ :param bindings: ``BindingDict`` a starting set of bindings with which
431
+ the unification must be consistent
432
+ :return: ``BindingDict`` A dictionary of the bindings required to unify
433
+ :raise ``BindingException``: If the terms cannot be unified
434
+ """
435
+ assert isinstance(a, Expression)
436
+ assert isinstance(b, Expression)
437
+
438
+ if bindings is None:
439
+ bindings = BindingDict()
440
+ if used is None:
441
+ used = ([], [])
442
+
443
+ # Use resolution
444
+ if isinstance(a, NegatedExpression) and isinstance(b, ApplicationExpression):
445
+ newbindings = most_general_unification(a.term, b, bindings)
446
+ newused = (used[0] + [a], used[1] + [b])
447
+ unused = ([], [])
448
+ elif isinstance(a, ApplicationExpression) and isinstance(b, NegatedExpression):
449
+ newbindings = most_general_unification(a, b.term, bindings)
450
+ newused = (used[0] + [a], used[1] + [b])
451
+ unused = ([], [])
452
+
453
+ # Use demodulation
454
+ elif isinstance(a, EqualityExpression):
455
+ newbindings = BindingDict([(a.first.variable, a.second)])
456
+ newused = (used[0] + [a], used[1])
457
+ unused = ([], [b])
458
+ elif isinstance(b, EqualityExpression):
459
+ newbindings = BindingDict([(b.first.variable, b.second)])
460
+ newused = (used[0], used[1] + [b])
461
+ unused = ([a], [])
462
+
463
+ else:
464
+ raise BindingException((a, b))
465
+
466
+ return newbindings, newused, unused
467
+
468
+
469
+ def _complete_unify_path(first, second, bindings, used, skipped, debug):
470
+ if used[0] or used[1]: # if bindings were made along the path
471
+ newclause = Clause(skipped[0] + skipped[1] + first + second)
472
+ debug.line(" -> New Clause: %s" % newclause)
473
+ return [newclause.substitute_bindings(bindings)]
474
+ else: # no bindings made means no unification occurred. so no result
475
+ debug.line(" -> End")
476
+ return []
477
+
478
+
479
+ def _subsumes_finalize(first, second, bindings, used, skipped, debug):
480
+ if not len(skipped[0]) and not len(first):
481
+ # If there are no skipped terms and no terms left in 'first', then
482
+ # all of the terms in the original 'self' were unified with terms
483
+ # in 'other'. Therefore, there exists a binding (this one) such that
484
+ # every term in self can be unified with a term in other, which
485
+ # is the definition of subsumption.
486
+ return [True]
487
+ else:
488
+ return []
489
+
490
+
491
+ def clausify(expression):
492
+ """
493
+ Skolemize, clausify, and standardize the variables apart.
494
+ """
495
+ clause_list = []
496
+ for clause in _clausify(skolemize(expression)):
497
+ for free in clause.free():
498
+ if is_indvar(free.name):
499
+ newvar = VariableExpression(unique_variable())
500
+ clause = clause.replace(free, newvar)
501
+ clause_list.append(clause)
502
+ return clause_list
503
+
504
+
505
+ def _clausify(expression):
506
+ """
507
+ :param expression: a skolemized expression in CNF
508
+ """
509
+ if isinstance(expression, AndExpression):
510
+ return _clausify(expression.first) + _clausify(expression.second)
511
+ elif isinstance(expression, OrExpression):
512
+ first = _clausify(expression.first)
513
+ second = _clausify(expression.second)
514
+ assert len(first) == 1
515
+ assert len(second) == 1
516
+ return [first[0] + second[0]]
517
+ elif isinstance(expression, EqualityExpression):
518
+ return [Clause([expression])]
519
+ elif isinstance(expression, ApplicationExpression):
520
+ return [Clause([expression])]
521
+ elif isinstance(expression, NegatedExpression):
522
+ if isinstance(expression.term, ApplicationExpression):
523
+ return [Clause([expression])]
524
+ elif isinstance(expression.term, EqualityExpression):
525
+ return [Clause([expression])]
526
+ raise ProverParseError()
527
+
528
+
529
+ class BindingDict:
530
+ def __init__(self, binding_list=None):
531
+ """
532
+ :param binding_list: list of (``AbstractVariableExpression``, ``AtomicExpression``) to initialize the dictionary
533
+ """
534
+ self.d = {}
535
+
536
+ if binding_list:
537
+ for (v, b) in binding_list:
538
+ self[v] = b
539
+
540
+ def __setitem__(self, variable, binding):
541
+ """
542
+ A binding is consistent with the dict if its variable is not already bound, OR if its
543
+ variable is already bound to its argument.
544
+
545
+ :param variable: ``Variable`` The variable to bind
546
+ :param binding: ``Expression`` The atomic to which 'variable' should be bound
547
+ :raise BindingException: If the variable cannot be bound in this dictionary
548
+ """
549
+ assert isinstance(variable, Variable)
550
+ assert isinstance(binding, Expression)
551
+
552
+ try:
553
+ existing = self[variable]
554
+ except KeyError:
555
+ existing = None
556
+
557
+ if not existing or binding == existing:
558
+ self.d[variable] = binding
559
+ elif isinstance(binding, IndividualVariableExpression):
560
+ # Since variable is already bound, try to bind binding to variable
561
+ try:
562
+ existing = self[binding.variable]
563
+ except KeyError:
564
+ existing = None
565
+
566
+ binding2 = VariableExpression(variable)
567
+
568
+ if not existing or binding2 == existing:
569
+ self.d[binding.variable] = binding2
570
+ else:
571
+ raise BindingException(
572
+ "Variable %s already bound to another " "value" % (variable)
573
+ )
574
+ else:
575
+ raise BindingException(
576
+ "Variable %s already bound to another " "value" % (variable)
577
+ )
578
+
579
+ def __getitem__(self, variable):
580
+ """
581
+ Return the expression to which 'variable' is bound
582
+ """
583
+ assert isinstance(variable, Variable)
584
+
585
+ intermediate = self.d[variable]
586
+ while intermediate:
587
+ try:
588
+ intermediate = self.d[intermediate]
589
+ except KeyError:
590
+ return intermediate
591
+
592
+ def __contains__(self, item):
593
+ return item in self.d
594
+
595
+ def __add__(self, other):
596
+ """
597
+ :param other: ``BindingDict`` The dict with which to combine self
598
+ :return: ``BindingDict`` A new dict containing all the elements of both parameters
599
+ :raise BindingException: If the parameter dictionaries are not consistent with each other
600
+ """
601
+ try:
602
+ combined = BindingDict()
603
+ for v in self.d:
604
+ combined[v] = self.d[v]
605
+ for v in other.d:
606
+ combined[v] = other.d[v]
607
+ return combined
608
+ except BindingException as e:
609
+ raise BindingException(
610
+ "Attempting to add two contradicting "
611
+ "BindingDicts: '%s' and '%s'" % (self, other)
612
+ ) from e
613
+
614
+ def __len__(self):
615
+ return len(self.d)
616
+
617
+ def __str__(self):
618
+ data_str = ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys()))
619
+ return "{" + data_str + "}"
620
+
621
+ def __repr__(self):
622
+ return "%s" % self
623
+
624
+
625
+ def most_general_unification(a, b, bindings=None):
626
+ """
627
+ Find the most general unification of the two given expressions
628
+
629
+ :param a: ``Expression``
630
+ :param b: ``Expression``
631
+ :param bindings: ``BindingDict`` a starting set of bindings with which the
632
+ unification must be consistent
633
+ :return: a list of bindings
634
+ :raise BindingException: if the Expressions cannot be unified
635
+ """
636
+ if bindings is None:
637
+ bindings = BindingDict()
638
+
639
+ if a == b:
640
+ return bindings
641
+ elif isinstance(a, IndividualVariableExpression):
642
+ return _mgu_var(a, b, bindings)
643
+ elif isinstance(b, IndividualVariableExpression):
644
+ return _mgu_var(b, a, bindings)
645
+ elif isinstance(a, ApplicationExpression) and isinstance(b, ApplicationExpression):
646
+ return most_general_unification(
647
+ a.function, b.function, bindings
648
+ ) + most_general_unification(a.argument, b.argument, bindings)
649
+ raise BindingException((a, b))
650
+
651
+
652
+ def _mgu_var(var, expression, bindings):
653
+ if var.variable in expression.free() | expression.constants():
654
+ raise BindingException((var, expression))
655
+ else:
656
+ return BindingDict([(var.variable, expression)]) + bindings
657
+
658
+
659
+ class BindingException(Exception):
660
+ def __init__(self, arg):
661
+ if isinstance(arg, tuple):
662
+ Exception.__init__(self, "'%s' cannot be bound to '%s'" % arg)
663
+ else:
664
+ Exception.__init__(self, arg)
665
+
666
+
667
+ class UnificationException(Exception):
668
+ def __init__(self, a, b):
669
+ Exception.__init__(self, f"'{a}' cannot unify with '{b}'")
670
+
671
+
672
+ class DebugObject:
673
+ def __init__(self, enabled=True, indent=0):
674
+ self.enabled = enabled
675
+ self.indent = indent
676
+
677
+ def __add__(self, i):
678
+ return DebugObject(self.enabled, self.indent + i)
679
+
680
+ def line(self, line):
681
+ if self.enabled:
682
+ print(" " * self.indent + line)
683
+
684
+
685
+ def testResolutionProver():
686
+ resolution_test(r"man(x)")
687
+ resolution_test(r"(man(x) -> man(x))")
688
+ resolution_test(r"(man(x) -> --man(x))")
689
+ resolution_test(r"-(man(x) and -man(x))")
690
+ resolution_test(r"(man(x) or -man(x))")
691
+ resolution_test(r"(man(x) -> man(x))")
692
+ resolution_test(r"-(man(x) and -man(x))")
693
+ resolution_test(r"(man(x) or -man(x))")
694
+ resolution_test(r"(man(x) -> man(x))")
695
+ resolution_test(r"(man(x) iff man(x))")
696
+ resolution_test(r"-(man(x) iff -man(x))")
697
+ resolution_test("all x.man(x)")
698
+ resolution_test("-all x.some y.F(x,y) & some x.all y.(-F(x,y))")
699
+ resolution_test("some x.all y.sees(x,y)")
700
+
701
+ p1 = Expression.fromstring(r"all x.(man(x) -> mortal(x))")
702
+ p2 = Expression.fromstring(r"man(Socrates)")
703
+ c = Expression.fromstring(r"mortal(Socrates)")
704
+ print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}")
705
+
706
+ p1 = Expression.fromstring(r"all x.(man(x) -> walks(x))")
707
+ p2 = Expression.fromstring(r"man(John)")
708
+ c = Expression.fromstring(r"some y.walks(y)")
709
+ print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}")
710
+
711
+ p = Expression.fromstring(r"some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))")
712
+ c = Expression.fromstring(r"some e0.walk(e0,mary)")
713
+ print(f"{p} |- {c}: {ResolutionProver().prove(c, [p])}")
714
+
715
+
716
+ def resolution_test(e):
717
+ f = Expression.fromstring(e)
718
+ t = ResolutionProver().prove(f)
719
+ print(f"|- {f}: {t}")
720
+
721
+
722
+ def test_clausify():
723
+ lexpr = Expression.fromstring
724
+
725
+ print(clausify(lexpr("P(x) | Q(x)")))
726
+ print(clausify(lexpr("(P(x) & Q(x)) | R(x)")))
727
+ print(clausify(lexpr("P(x) | (Q(x) & R(x))")))
728
+ print(clausify(lexpr("(P(x) & Q(x)) | (R(x) & S(x))")))
729
+
730
+ print(clausify(lexpr("P(x) | Q(x) | R(x)")))
731
+ print(clausify(lexpr("P(x) | (Q(x) & R(x)) | S(x)")))
732
+
733
+ print(clausify(lexpr("exists x.P(x) | Q(x)")))
734
+
735
+ print(clausify(lexpr("-(-P(x) & Q(x))")))
736
+ print(clausify(lexpr("P(x) <-> Q(x)")))
737
+ print(clausify(lexpr("-(P(x) <-> Q(x))")))
738
+ print(clausify(lexpr("-(all x.P(x))")))
739
+ print(clausify(lexpr("-(some x.P(x))")))
740
+
741
+ print(clausify(lexpr("some x.P(x)")))
742
+ print(clausify(lexpr("some x.all y.P(x,y)")))
743
+ print(clausify(lexpr("all y.some x.P(x,y)")))
744
+ print(clausify(lexpr("all z.all y.some x.P(x,y,z)")))
745
+ print(clausify(lexpr("all x.(all y.P(x,y) -> -all y.(Q(x,y) -> R(x,y)))")))
746
+
747
+
748
+ def demo():
749
+ test_clausify()
750
+ print()
751
+ testResolutionProver()
752
+ print()
753
+
754
+ p = Expression.fromstring("man(x)")
755
+ print(ResolutionProverCommand(p, [p]).prove())
756
+
757
+
758
+ if __name__ == "__main__":
759
+ demo()