Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- lib/python3.10/site-packages/google/_upb/_message.abi3.so +3 -0
- lib/python3.10/site-packages/nltk/test/__init__.py +18 -0
- lib/python3.10/site-packages/nltk/test/all.py +25 -0
- lib/python3.10/site-packages/nltk/test/bnc.doctest +60 -0
- lib/python3.10/site-packages/nltk/test/ccg_semantics.doctest +552 -0
- lib/python3.10/site-packages/nltk/test/childes.doctest +190 -0
- lib/python3.10/site-packages/nltk/test/chunk.doctest +372 -0
- lib/python3.10/site-packages/nltk/test/classify.doctest +202 -0
- lib/python3.10/site-packages/nltk/test/collections.doctest +31 -0
- lib/python3.10/site-packages/nltk/test/collocations.doctest +307 -0
- lib/python3.10/site-packages/nltk/test/conftest.py +33 -0
- lib/python3.10/site-packages/nltk/test/crubadan.doctest +65 -0
- lib/python3.10/site-packages/nltk/test/drt.doctest +515 -0
- lib/python3.10/site-packages/nltk/test/gensim_fixt.py +4 -0
- lib/python3.10/site-packages/nltk/test/gluesemantics.doctest +383 -0
- lib/python3.10/site-packages/nltk/test/gluesemantics_malt.doctest +69 -0
- lib/python3.10/site-packages/nltk/test/index.doctest +100 -0
- lib/python3.10/site-packages/nltk/test/inference.doctest +536 -0
- lib/python3.10/site-packages/nltk/test/internals.doctest +161 -0
- lib/python3.10/site-packages/nltk/test/logic.doctest +1096 -0
- lib/python3.10/site-packages/nltk/test/metrics.doctest +321 -0
- lib/python3.10/site-packages/nltk/test/misc.doctest +118 -0
- lib/python3.10/site-packages/nltk/test/probability.doctest +306 -0
- lib/python3.10/site-packages/nltk/test/propbank.doctest +176 -0
- lib/python3.10/site-packages/nltk/test/relextract.doctest +263 -0
- lib/python3.10/site-packages/nltk/test/resolution.doctest +222 -0
- lib/python3.10/site-packages/nltk/test/semantics.doctest +667 -0
- lib/python3.10/site-packages/nltk/test/sentiment.doctest +236 -0
- lib/python3.10/site-packages/nltk/test/tag.doctest +475 -0
- lib/python3.10/site-packages/nltk/test/treetransforms.doctest +154 -0
- lib/python3.10/site-packages/nltk/test/unit/__init__.py +0 -0
- lib/python3.10/site-packages/nltk/test/unit/lm/__init__.py +0 -0
- lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py +116 -0
- lib/python3.10/site-packages/nltk/test/unit/lm/test_preprocessing.py +30 -0
- lib/python3.10/site-packages/nltk/test/unit/lm/test_vocabulary.py +156 -0
- lib/python3.10/site-packages/nltk/test/unit/test_aline.py +48 -0
- lib/python3.10/site-packages/nltk/test/unit/test_bllip.py +42 -0
- lib/python3.10/site-packages/nltk/test/unit/test_brill.py +34 -0
- lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py +39 -0
- lib/python3.10/site-packages/nltk/test/unit/test_chunk.py +85 -0
- lib/python3.10/site-packages/nltk/test/unit/test_classify.py +49 -0
- lib/python3.10/site-packages/nltk/test/unit/test_collocations.py +120 -0
- lib/python3.10/site-packages/nltk/test/unit/test_corpora.py +274 -0
- lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py +48 -0
- lib/python3.10/site-packages/nltk/test/unit/test_data.py +15 -0
- lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py +144 -0
- lib/python3.10/site-packages/nltk/test/unit/test_distance.py +129 -0
- lib/python3.10/site-packages/nltk/test/unit/test_downloader.py +19 -0
- lib/python3.10/site-packages/nltk/test/unit/test_freqdist.py +7 -0
.gitattributes
CHANGED
@@ -61,3 +61,4 @@ lib/python3.10/site-packages/_cffi_backend.cpython-310-x86_64-linux-gnu.so filte
|
|
61 |
lib/python3.10/site-packages/kenlm.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
62 |
lib/python3.10/site-packages/libkenlm.so filter=lfs diff=lfs merge=lfs -text
|
63 |
lib/python3.10/site-packages/tokenizers/tokenizers.abi3.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
61 |
lib/python3.10/site-packages/kenlm.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
62 |
lib/python3.10/site-packages/libkenlm.so filter=lfs diff=lfs merge=lfs -text
|
63 |
lib/python3.10/site-packages/tokenizers/tokenizers.abi3.so filter=lfs diff=lfs merge=lfs -text
|
64 |
+
lib/python3.10/site-packages/google/_upb/_message.abi3.so filter=lfs diff=lfs merge=lfs -text
|
lib/python3.10/site-packages/google/_upb/_message.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:472be67e0d62d25c594600bebcfa017cc19a0ff482be1a7a2204341b7424de15
|
3 |
+
size 371296
|
lib/python3.10/site-packages/nltk/test/__init__.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Unit Tests
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Unit tests for the NLTK modules. These tests are intended to ensure
|
10 |
+
that source code changes don't accidentally introduce bugs.
|
11 |
+
For instructions, please see:
|
12 |
+
|
13 |
+
../../web/dev/local_testing.rst
|
14 |
+
|
15 |
+
https://github.com/nltk/nltk/blob/develop/web/dev/local_testing.rst
|
16 |
+
|
17 |
+
|
18 |
+
"""
|
lib/python3.10/site-packages/nltk/test/all.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Test suite that runs all NLTK tests.
|
2 |
+
|
3 |
+
This module, `nltk.test.all`, is named as the NLTK ``test_suite`` in the
|
4 |
+
project's ``setup-eggs.py`` file. Here, we create a test suite that
|
5 |
+
runs all of our doctests, and return it for processing by the setuptools
|
6 |
+
test harness.
|
7 |
+
|
8 |
+
"""
|
9 |
+
import doctest
|
10 |
+
import os.path
|
11 |
+
import unittest
|
12 |
+
from glob import glob
|
13 |
+
|
14 |
+
|
15 |
+
def additional_tests():
|
16 |
+
# print("here-000000000000000")
|
17 |
+
# print("-----", glob(os.path.join(os.path.dirname(__file__), '*.doctest')))
|
18 |
+
dir = os.path.dirname(__file__)
|
19 |
+
paths = glob(os.path.join(dir, "*.doctest"))
|
20 |
+
files = [os.path.basename(path) for path in paths]
|
21 |
+
return unittest.TestSuite([doctest.DocFileSuite(file) for file in files])
|
22 |
+
|
23 |
+
|
24 |
+
# if os.path.split(path)[-1] != 'index.rst'
|
25 |
+
# skips time-dependent doctest in index.rst
|
lib/python3.10/site-packages/nltk/test/bnc.doctest
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
>>> import os.path
|
5 |
+
|
6 |
+
>>> from nltk.corpus.reader import BNCCorpusReader
|
7 |
+
>>> import nltk.test
|
8 |
+
|
9 |
+
>>> root = os.path.dirname(nltk.test.__file__)
|
10 |
+
>>> bnc = BNCCorpusReader(root=root, fileids='FX8.xml')
|
11 |
+
|
12 |
+
Checking the word access.
|
13 |
+
-------------------------
|
14 |
+
|
15 |
+
>>> len(bnc.words())
|
16 |
+
151
|
17 |
+
|
18 |
+
>>> bnc.words()[:6]
|
19 |
+
['Ah', 'there', 'we', 'are', ',', '.']
|
20 |
+
>>> bnc.words(stem=True)[:6]
|
21 |
+
['ah', 'there', 'we', 'be', ',', '.']
|
22 |
+
|
23 |
+
>>> bnc.tagged_words()[:6]
|
24 |
+
[('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')]
|
25 |
+
|
26 |
+
>>> bnc.tagged_words(c5=True)[:6]
|
27 |
+
[('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')]
|
28 |
+
|
29 |
+
Testing access to the sentences.
|
30 |
+
--------------------------------
|
31 |
+
|
32 |
+
>>> len(bnc.sents())
|
33 |
+
15
|
34 |
+
|
35 |
+
>>> bnc.sents()[0]
|
36 |
+
['Ah', 'there', 'we', 'are', ',', '.']
|
37 |
+
>>> bnc.sents(stem=True)[0]
|
38 |
+
['ah', 'there', 'we', 'be', ',', '.']
|
39 |
+
|
40 |
+
>>> bnc.tagged_sents()[0]
|
41 |
+
[('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')]
|
42 |
+
>>> bnc.tagged_sents(c5=True)[0]
|
43 |
+
[('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')]
|
44 |
+
|
45 |
+
A not lazy loader.
|
46 |
+
------------------
|
47 |
+
|
48 |
+
>>> eager = BNCCorpusReader(root=root, fileids=r'FX8.xml', lazy=False)
|
49 |
+
|
50 |
+
>>> len(eager.words())
|
51 |
+
151
|
52 |
+
>>> eager.words(stem=True)[6:17]
|
53 |
+
['right', 'abdominal', 'wound', ',', 'she', 'be', 'a', 'wee', 'bit', 'confuse', '.']
|
54 |
+
|
55 |
+
>>> eager.tagged_words()[6:11]
|
56 |
+
[('Right', 'ADV'), ('abdominal', 'ADJ'), ('wound', 'SUBST'), (',', 'PUN'), ('she', 'PRON')]
|
57 |
+
>>> eager.tagged_words(c5=True)[6:17]
|
58 |
+
[('Right', 'AV0'), ('abdominal', 'AJ0'), ('wound', 'NN1'), (',', 'PUN'), ('she', 'PNP'), ("'s", 'VBZ'), ('a', 'AT0'), ('wee', 'AJ0-NN1'), ('bit', 'NN1'), ('confused', 'VVN-AJ0'), ('.', 'PUN')]
|
59 |
+
>>> len(eager.sents())
|
60 |
+
15
|
lib/python3.10/site-packages/nltk/test/ccg_semantics.doctest
ADDED
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==============================================
|
5 |
+
Combinatory Categorial Grammar with semantics
|
6 |
+
==============================================
|
7 |
+
|
8 |
+
-----
|
9 |
+
Chart
|
10 |
+
-----
|
11 |
+
|
12 |
+
|
13 |
+
>>> from nltk.ccg import chart, lexicon
|
14 |
+
>>> from nltk.ccg.chart import printCCGDerivation
|
15 |
+
|
16 |
+
No semantics
|
17 |
+
-------------------
|
18 |
+
|
19 |
+
>>> lex = lexicon.fromstring('''
|
20 |
+
... :- S, NP, N
|
21 |
+
... She => NP
|
22 |
+
... has => (S\\NP)/NP
|
23 |
+
... books => NP
|
24 |
+
... ''',
|
25 |
+
... False)
|
26 |
+
|
27 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
28 |
+
>>> parses = list(parser.parse("She has books".split()))
|
29 |
+
>>> print(str(len(parses)) + " parses")
|
30 |
+
3 parses
|
31 |
+
|
32 |
+
>>> printCCGDerivation(parses[0])
|
33 |
+
She has books
|
34 |
+
NP ((S\NP)/NP) NP
|
35 |
+
-------------------->
|
36 |
+
(S\NP)
|
37 |
+
-------------------------<
|
38 |
+
S
|
39 |
+
|
40 |
+
>>> printCCGDerivation(parses[1])
|
41 |
+
She has books
|
42 |
+
NP ((S\NP)/NP) NP
|
43 |
+
----->T
|
44 |
+
(S/(S\NP))
|
45 |
+
-------------------->
|
46 |
+
(S\NP)
|
47 |
+
------------------------->
|
48 |
+
S
|
49 |
+
|
50 |
+
|
51 |
+
>>> printCCGDerivation(parses[2])
|
52 |
+
She has books
|
53 |
+
NP ((S\NP)/NP) NP
|
54 |
+
----->T
|
55 |
+
(S/(S\NP))
|
56 |
+
------------------>B
|
57 |
+
(S/NP)
|
58 |
+
------------------------->
|
59 |
+
S
|
60 |
+
|
61 |
+
Simple semantics
|
62 |
+
-------------------
|
63 |
+
|
64 |
+
>>> lex = lexicon.fromstring('''
|
65 |
+
... :- S, NP, N
|
66 |
+
... She => NP {she}
|
67 |
+
... has => (S\\NP)/NP {\\x y.have(y, x)}
|
68 |
+
... a => NP/N {\\P.exists z.P(z)}
|
69 |
+
... book => N {book}
|
70 |
+
... ''',
|
71 |
+
... True)
|
72 |
+
|
73 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
74 |
+
>>> parses = list(parser.parse("She has a book".split()))
|
75 |
+
>>> print(str(len(parses)) + " parses")
|
76 |
+
7 parses
|
77 |
+
|
78 |
+
>>> printCCGDerivation(parses[0])
|
79 |
+
She has a book
|
80 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
81 |
+
------------------------------------->
|
82 |
+
NP {exists z.book(z)}
|
83 |
+
------------------------------------------------------------------->
|
84 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
85 |
+
-----------------------------------------------------------------------------<
|
86 |
+
S {have(she,exists z.book(z))}
|
87 |
+
|
88 |
+
>>> printCCGDerivation(parses[1])
|
89 |
+
She has a book
|
90 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
91 |
+
--------------------------------------------------------->B
|
92 |
+
((S\NP)/N) {\P y.have(y,exists z.P(z))}
|
93 |
+
------------------------------------------------------------------->
|
94 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
95 |
+
-----------------------------------------------------------------------------<
|
96 |
+
S {have(she,exists z.book(z))}
|
97 |
+
|
98 |
+
>>> printCCGDerivation(parses[2])
|
99 |
+
She has a book
|
100 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
101 |
+
---------->T
|
102 |
+
(S/(S\NP)) {\F.F(she)}
|
103 |
+
------------------------------------->
|
104 |
+
NP {exists z.book(z)}
|
105 |
+
------------------------------------------------------------------->
|
106 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
107 |
+
----------------------------------------------------------------------------->
|
108 |
+
S {have(she,exists z.book(z))}
|
109 |
+
|
110 |
+
>>> printCCGDerivation(parses[3])
|
111 |
+
She has a book
|
112 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
113 |
+
---------->T
|
114 |
+
(S/(S\NP)) {\F.F(she)}
|
115 |
+
--------------------------------------------------------->B
|
116 |
+
((S\NP)/N) {\P y.have(y,exists z.P(z))}
|
117 |
+
------------------------------------------------------------------->
|
118 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
119 |
+
----------------------------------------------------------------------------->
|
120 |
+
S {have(she,exists z.book(z))}
|
121 |
+
|
122 |
+
>>> printCCGDerivation(parses[4])
|
123 |
+
She has a book
|
124 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
125 |
+
---------->T
|
126 |
+
(S/(S\NP)) {\F.F(she)}
|
127 |
+
---------------------------------------->B
|
128 |
+
(S/NP) {\x.have(she,x)}
|
129 |
+
------------------------------------->
|
130 |
+
NP {exists z.book(z)}
|
131 |
+
----------------------------------------------------------------------------->
|
132 |
+
S {have(she,exists z.book(z))}
|
133 |
+
|
134 |
+
>>> printCCGDerivation(parses[5])
|
135 |
+
She has a book
|
136 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
137 |
+
---------->T
|
138 |
+
(S/(S\NP)) {\F.F(she)}
|
139 |
+
--------------------------------------------------------->B
|
140 |
+
((S\NP)/N) {\P y.have(y,exists z.P(z))}
|
141 |
+
------------------------------------------------------------------->B
|
142 |
+
(S/N) {\P.have(she,exists z.P(z))}
|
143 |
+
----------------------------------------------------------------------------->
|
144 |
+
S {have(she,exists z.book(z))}
|
145 |
+
|
146 |
+
>>> printCCGDerivation(parses[6])
|
147 |
+
She has a book
|
148 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
149 |
+
---------->T
|
150 |
+
(S/(S\NP)) {\F.F(she)}
|
151 |
+
---------------------------------------->B
|
152 |
+
(S/NP) {\x.have(she,x)}
|
153 |
+
------------------------------------------------------------------->B
|
154 |
+
(S/N) {\P.have(she,exists z.P(z))}
|
155 |
+
----------------------------------------------------------------------------->
|
156 |
+
S {have(she,exists z.book(z))}
|
157 |
+
|
158 |
+
Complex semantics
|
159 |
+
-------------------
|
160 |
+
|
161 |
+
>>> lex = lexicon.fromstring('''
|
162 |
+
... :- S, NP, N
|
163 |
+
... She => NP {she}
|
164 |
+
... has => (S\\NP)/NP {\\x y.have(y, x)}
|
165 |
+
... a => ((S\\NP)\\((S\\NP)/NP))/N {\\P R x.(exists z.P(z) & R(z,x))}
|
166 |
+
... book => N {book}
|
167 |
+
... ''',
|
168 |
+
... True)
|
169 |
+
|
170 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
171 |
+
>>> parses = list(parser.parse("She has a book".split()))
|
172 |
+
>>> print(str(len(parses)) + " parses")
|
173 |
+
2 parses
|
174 |
+
|
175 |
+
>>> printCCGDerivation(parses[0])
|
176 |
+
She has a book
|
177 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (((S\NP)\((S\NP)/NP))/N) {\P R x.(exists z.P(z) & R(z,x))} N {book}
|
178 |
+
---------------------------------------------------------------------->
|
179 |
+
((S\NP)\((S\NP)/NP)) {\R x.(exists z.book(z) & R(z,x))}
|
180 |
+
----------------------------------------------------------------------------------------------------<
|
181 |
+
(S\NP) {\x.(exists z.book(z) & have(x,z))}
|
182 |
+
--------------------------------------------------------------------------------------------------------------<
|
183 |
+
S {(exists z.book(z) & have(she,z))}
|
184 |
+
|
185 |
+
>>> printCCGDerivation(parses[1])
|
186 |
+
She has a book
|
187 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (((S\NP)\((S\NP)/NP))/N) {\P R x.(exists z.P(z) & R(z,x))} N {book}
|
188 |
+
---------->T
|
189 |
+
(S/(S\NP)) {\F.F(she)}
|
190 |
+
---------------------------------------------------------------------->
|
191 |
+
((S\NP)\((S\NP)/NP)) {\R x.(exists z.book(z) & R(z,x))}
|
192 |
+
----------------------------------------------------------------------------------------------------<
|
193 |
+
(S\NP) {\x.(exists z.book(z) & have(x,z))}
|
194 |
+
-------------------------------------------------------------------------------------------------------------->
|
195 |
+
S {(exists z.book(z) & have(she,z))}
|
196 |
+
|
197 |
+
Using conjunctions
|
198 |
+
---------------------
|
199 |
+
|
200 |
+
# TODO: The semantics of "and" should have been more flexible
|
201 |
+
>>> lex = lexicon.fromstring('''
|
202 |
+
... :- S, NP, N
|
203 |
+
... I => NP {I}
|
204 |
+
... cook => (S\\NP)/NP {\\x y.cook(x,y)}
|
205 |
+
... and => var\\.,var/.,var {\\P Q x y.(P(x,y) & Q(x,y))}
|
206 |
+
... eat => (S\\NP)/NP {\\x y.eat(x,y)}
|
207 |
+
... the => NP/N {\\x.the(x)}
|
208 |
+
... bacon => N {bacon}
|
209 |
+
... ''',
|
210 |
+
... True)
|
211 |
+
|
212 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
213 |
+
>>> parses = list(parser.parse("I cook and eat the bacon".split()))
|
214 |
+
>>> print(str(len(parses)) + " parses")
|
215 |
+
7 parses
|
216 |
+
|
217 |
+
>>> printCCGDerivation(parses[0])
|
218 |
+
I cook and eat the bacon
|
219 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
220 |
+
------------------------------------------------------------------------------------->
|
221 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
222 |
+
-------------------------------------------------------------------------------------------------------------------<
|
223 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
224 |
+
------------------------------->
|
225 |
+
NP {the(bacon)}
|
226 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
227 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
228 |
+
----------------------------------------------------------------------------------------------------------------------------------------------------------<
|
229 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
230 |
+
|
231 |
+
>>> printCCGDerivation(parses[1])
|
232 |
+
I cook and eat the bacon
|
233 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
234 |
+
------------------------------------------------------------------------------------->
|
235 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
236 |
+
-------------------------------------------------------------------------------------------------------------------<
|
237 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
238 |
+
--------------------------------------------------------------------------------------------------------------------------------------->B
|
239 |
+
((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
|
240 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
241 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
242 |
+
----------------------------------------------------------------------------------------------------------------------------------------------------------<
|
243 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
244 |
+
|
245 |
+
>>> printCCGDerivation(parses[2])
|
246 |
+
I cook and eat the bacon
|
247 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
248 |
+
-------->T
|
249 |
+
(S/(S\NP)) {\F.F(I)}
|
250 |
+
------------------------------------------------------------------------------------->
|
251 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
252 |
+
-------------------------------------------------------------------------------------------------------------------<
|
253 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
254 |
+
------------------------------->
|
255 |
+
NP {the(bacon)}
|
256 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
257 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
258 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
259 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
260 |
+
|
261 |
+
>>> printCCGDerivation(parses[3])
|
262 |
+
I cook and eat the bacon
|
263 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
264 |
+
-------->T
|
265 |
+
(S/(S\NP)) {\F.F(I)}
|
266 |
+
------------------------------------------------------------------------------------->
|
267 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
268 |
+
-------------------------------------------------------------------------------------------------------------------<
|
269 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
270 |
+
--------------------------------------------------------------------------------------------------------------------------------------->B
|
271 |
+
((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
|
272 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
273 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
274 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
275 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
276 |
+
|
277 |
+
>>> printCCGDerivation(parses[4])
|
278 |
+
I cook and eat the bacon
|
279 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
280 |
+
-------->T
|
281 |
+
(S/(S\NP)) {\F.F(I)}
|
282 |
+
------------------------------------------------------------------------------------->
|
283 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
284 |
+
-------------------------------------------------------------------------------------------------------------------<
|
285 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
286 |
+
--------------------------------------------------------------------------------------------------------------------------->B
|
287 |
+
(S/NP) {\x.(eat(x,I) & cook(x,I))}
|
288 |
+
------------------------------->
|
289 |
+
NP {the(bacon)}
|
290 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
291 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
292 |
+
|
293 |
+
>>> printCCGDerivation(parses[5])
|
294 |
+
I cook and eat the bacon
|
295 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
296 |
+
-------->T
|
297 |
+
(S/(S\NP)) {\F.F(I)}
|
298 |
+
------------------------------------------------------------------------------------->
|
299 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
300 |
+
-------------------------------------------------------------------------------------------------------------------<
|
301 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
302 |
+
--------------------------------------------------------------------------------------------------------------------------------------->B
|
303 |
+
((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
|
304 |
+
----------------------------------------------------------------------------------------------------------------------------------------------->B
|
305 |
+
(S/N) {\x.(eat(the(x),I) & cook(the(x),I))}
|
306 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
307 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
308 |
+
|
309 |
+
>>> printCCGDerivation(parses[6])
|
310 |
+
I cook and eat the bacon
|
311 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
312 |
+
-------->T
|
313 |
+
(S/(S\NP)) {\F.F(I)}
|
314 |
+
------------------------------------------------------------------------------------->
|
315 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
316 |
+
-------------------------------------------------------------------------------------------------------------------<
|
317 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
318 |
+
--------------------------------------------------------------------------------------------------------------------------->B
|
319 |
+
(S/NP) {\x.(eat(x,I) & cook(x,I))}
|
320 |
+
----------------------------------------------------------------------------------------------------------------------------------------------->B
|
321 |
+
(S/N) {\x.(eat(the(x),I) & cook(the(x),I))}
|
322 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
323 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
324 |
+
|
325 |
+
Tests from published papers
|
326 |
+
------------------------------
|
327 |
+
|
328 |
+
An example from "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", Hockenmaier and Steedman, 2007, Page 359, https://www.aclweb.org/anthology/J/J07/J07-3004.pdf
|
329 |
+
|
330 |
+
>>> lex = lexicon.fromstring('''
|
331 |
+
... :- S, NP
|
332 |
+
... I => NP {I}
|
333 |
+
... give => ((S\\NP)/NP)/NP {\\x y z.give(y,x,z)}
|
334 |
+
... them => NP {them}
|
335 |
+
... money => NP {money}
|
336 |
+
... ''',
|
337 |
+
... True)
|
338 |
+
|
339 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
340 |
+
>>> parses = list(parser.parse("I give them money".split()))
|
341 |
+
>>> print(str(len(parses)) + " parses")
|
342 |
+
3 parses
|
343 |
+
|
344 |
+
>>> printCCGDerivation(parses[0])
|
345 |
+
I give them money
|
346 |
+
NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
|
347 |
+
-------------------------------------------------->
|
348 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
349 |
+
-------------------------------------------------------------->
|
350 |
+
(S\NP) {\z.give(money,them,z)}
|
351 |
+
----------------------------------------------------------------------<
|
352 |
+
S {give(money,them,I)}
|
353 |
+
|
354 |
+
>>> printCCGDerivation(parses[1])
|
355 |
+
I give them money
|
356 |
+
NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
|
357 |
+
-------->T
|
358 |
+
(S/(S\NP)) {\F.F(I)}
|
359 |
+
-------------------------------------------------->
|
360 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
361 |
+
-------------------------------------------------------------->
|
362 |
+
(S\NP) {\z.give(money,them,z)}
|
363 |
+
---------------------------------------------------------------------->
|
364 |
+
S {give(money,them,I)}
|
365 |
+
|
366 |
+
|
367 |
+
>>> printCCGDerivation(parses[2])
|
368 |
+
I give them money
|
369 |
+
NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
|
370 |
+
-------->T
|
371 |
+
(S/(S\NP)) {\F.F(I)}
|
372 |
+
-------------------------------------------------->
|
373 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
374 |
+
---------------------------------------------------------->B
|
375 |
+
(S/NP) {\y.give(y,them,I)}
|
376 |
+
---------------------------------------------------------------------->
|
377 |
+
S {give(money,them,I)}
|
378 |
+
|
379 |
+
|
380 |
+
An example from "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", Hockenmaier and Steedman, 2007, Page 359, https://www.aclweb.org/anthology/J/J07/J07-3004.pdf
|
381 |
+
|
382 |
+
>>> lex = lexicon.fromstring('''
|
383 |
+
... :- N, NP, S
|
384 |
+
... money => N {money}
|
385 |
+
... that => (N\\N)/(S/NP) {\\P Q x.(P(x) & Q(x))}
|
386 |
+
... I => NP {I}
|
387 |
+
... give => ((S\\NP)/NP)/NP {\\x y z.give(y,x,z)}
|
388 |
+
... them => NP {them}
|
389 |
+
... ''',
|
390 |
+
... True)
|
391 |
+
|
392 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
393 |
+
>>> parses = list(parser.parse("money that I give them".split()))
|
394 |
+
>>> print(str(len(parses)) + " parses")
|
395 |
+
3 parses
|
396 |
+
|
397 |
+
>>> printCCGDerivation(parses[0])
|
398 |
+
money that I give them
|
399 |
+
N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
|
400 |
+
-------->T
|
401 |
+
(S/(S\NP)) {\F.F(I)}
|
402 |
+
-------------------------------------------------->
|
403 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
404 |
+
---------------------------------------------------------->B
|
405 |
+
(S/NP) {\y.give(y,them,I)}
|
406 |
+
------------------------------------------------------------------------------------------------->
|
407 |
+
(N\N) {\Q x.(give(x,them,I) & Q(x))}
|
408 |
+
------------------------------------------------------------------------------------------------------------<
|
409 |
+
N {\x.(give(x,them,I) & money(x))}
|
410 |
+
|
411 |
+
>>> printCCGDerivation(parses[1])
|
412 |
+
money that I give them
|
413 |
+
N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
|
414 |
+
----------->T
|
415 |
+
(N/(N\N)) {\F.F(money)}
|
416 |
+
-------->T
|
417 |
+
(S/(S\NP)) {\F.F(I)}
|
418 |
+
-------------------------------------------------->
|
419 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
420 |
+
---------------------------------------------------------->B
|
421 |
+
(S/NP) {\y.give(y,them,I)}
|
422 |
+
------------------------------------------------------------------------------------------------->
|
423 |
+
(N\N) {\Q x.(give(x,them,I) & Q(x))}
|
424 |
+
------------------------------------------------------------------------------------------------------------>
|
425 |
+
N {\x.(give(x,them,I) & money(x))}
|
426 |
+
|
427 |
+
>>> printCCGDerivation(parses[2])
|
428 |
+
money that I give them
|
429 |
+
N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
|
430 |
+
----------->T
|
431 |
+
(N/(N\N)) {\F.F(money)}
|
432 |
+
-------------------------------------------------->B
|
433 |
+
(N/(S/NP)) {\P x.(P(x) & money(x))}
|
434 |
+
-------->T
|
435 |
+
(S/(S\NP)) {\F.F(I)}
|
436 |
+
-------------------------------------------------->
|
437 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
438 |
+
---------------------------------------------------------->B
|
439 |
+
(S/NP) {\y.give(y,them,I)}
|
440 |
+
------------------------------------------------------------------------------------------------------------>
|
441 |
+
N {\x.(give(x,them,I) & money(x))}
|
442 |
+
|
443 |
+
|
444 |
+
-------
|
445 |
+
Lexicon
|
446 |
+
-------
|
447 |
+
|
448 |
+
>>> from nltk.ccg import lexicon
|
449 |
+
|
450 |
+
Parse lexicon with semantics
|
451 |
+
|
452 |
+
>>> print(str(lexicon.fromstring(
|
453 |
+
... '''
|
454 |
+
... :- S,NP
|
455 |
+
...
|
456 |
+
... IntransVsg :: S\\NP[sg]
|
457 |
+
...
|
458 |
+
... sleeps => IntransVsg {\\x.sleep(x)}
|
459 |
+
... eats => S\\NP[sg]/NP {\\x y.eat(x,y)}
|
460 |
+
...
|
461 |
+
... and => var\\var/var {\\x y.x & y}
|
462 |
+
... ''',
|
463 |
+
... True
|
464 |
+
... )))
|
465 |
+
and => ((_var0\_var0)/_var0) {(\x y.x & y)}
|
466 |
+
eats => ((S\NP['sg'])/NP) {\x y.eat(x,y)}
|
467 |
+
sleeps => (S\NP['sg']) {\x.sleep(x)}
|
468 |
+
|
469 |
+
Parse lexicon without semantics
|
470 |
+
|
471 |
+
>>> print(str(lexicon.fromstring(
|
472 |
+
... '''
|
473 |
+
... :- S,NP
|
474 |
+
...
|
475 |
+
... IntransVsg :: S\\NP[sg]
|
476 |
+
...
|
477 |
+
... sleeps => IntransVsg
|
478 |
+
... eats => S\\NP[sg]/NP {sem=\\x y.eat(x,y)}
|
479 |
+
...
|
480 |
+
... and => var\\var/var
|
481 |
+
... ''',
|
482 |
+
... False
|
483 |
+
... )))
|
484 |
+
and => ((_var0\_var0)/_var0)
|
485 |
+
eats => ((S\NP['sg'])/NP)
|
486 |
+
sleeps => (S\NP['sg'])
|
487 |
+
|
488 |
+
Semantics are missing
|
489 |
+
|
490 |
+
>>> print(str(lexicon.fromstring(
|
491 |
+
... '''
|
492 |
+
... :- S,NP
|
493 |
+
...
|
494 |
+
... eats => S\\NP[sg]/NP
|
495 |
+
... ''',
|
496 |
+
... True
|
497 |
+
... )))
|
498 |
+
Traceback (most recent call last):
|
499 |
+
...
|
500 |
+
AssertionError: eats => S\NP[sg]/NP must contain semantics because include_semantics is set to True
|
501 |
+
|
502 |
+
|
503 |
+
------------------------------------
|
504 |
+
CCG combinator semantics computation
|
505 |
+
------------------------------------
|
506 |
+
|
507 |
+
>>> from nltk.sem.logic import *
|
508 |
+
>>> from nltk.ccg.logic import *
|
509 |
+
|
510 |
+
>>> read_expr = Expression.fromstring
|
511 |
+
|
512 |
+
Compute semantics from function application
|
513 |
+
|
514 |
+
>>> print(str(compute_function_semantics(read_expr(r'\x.P(x)'), read_expr(r'book'))))
|
515 |
+
P(book)
|
516 |
+
|
517 |
+
>>> print(str(compute_function_semantics(read_expr(r'\P.P(book)'), read_expr(r'read'))))
|
518 |
+
read(book)
|
519 |
+
|
520 |
+
>>> print(str(compute_function_semantics(read_expr(r'\P.P(book)'), read_expr(r'\x.read(x)'))))
|
521 |
+
read(book)
|
522 |
+
|
523 |
+
Compute semantics from composition
|
524 |
+
|
525 |
+
>>> print(str(compute_composition_semantics(read_expr(r'\x.P(x)'), read_expr(r'\x.Q(x)'))))
|
526 |
+
\x.P(Q(x))
|
527 |
+
|
528 |
+
>>> print(str(compute_composition_semantics(read_expr(r'\x.P(x)'), read_expr(r'read'))))
|
529 |
+
Traceback (most recent call last):
|
530 |
+
...
|
531 |
+
AssertionError: `read` must be a lambda expression
|
532 |
+
|
533 |
+
Compute semantics from substitution
|
534 |
+
|
535 |
+
>>> print(str(compute_substitution_semantics(read_expr(r'\x y.P(x,y)'), read_expr(r'\x.Q(x)'))))
|
536 |
+
\x.P(x,Q(x))
|
537 |
+
|
538 |
+
>>> print(str(compute_substitution_semantics(read_expr(r'\x.P(x)'), read_expr(r'read'))))
|
539 |
+
Traceback (most recent call last):
|
540 |
+
...
|
541 |
+
AssertionError: `\x.P(x)` must be a lambda expression with 2 arguments
|
542 |
+
|
543 |
+
Compute type-raise semantics
|
544 |
+
|
545 |
+
>>> print(str(compute_type_raised_semantics(read_expr(r'\x.P(x)'))))
|
546 |
+
\F x.F(P(x))
|
547 |
+
|
548 |
+
>>> print(str(compute_type_raised_semantics(read_expr(r'\x.F(x)'))))
|
549 |
+
\F1 x.F1(F(x))
|
550 |
+
|
551 |
+
>>> print(str(compute_type_raised_semantics(read_expr(r'\x y z.P(x,y,z)'))))
|
552 |
+
\F x y z.F(P(x,y,z))
|
lib/python3.10/site-packages/nltk/test/childes.doctest
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
=======================
|
2 |
+
CHILDES Corpus Readers
|
3 |
+
=======================
|
4 |
+
|
5 |
+
Read the XML version of the CHILDES corpus.
|
6 |
+
|
7 |
+
Setup
|
8 |
+
=====
|
9 |
+
|
10 |
+
>>> from nltk.test.childes_fixt import setup_module
|
11 |
+
>>> setup_module()
|
12 |
+
|
13 |
+
How to use CHILDESCorpusReader
|
14 |
+
==============================
|
15 |
+
|
16 |
+
Read the CHILDESCorpusReader class and read the CHILDES corpus saved in
|
17 |
+
the nltk_data directory.
|
18 |
+
|
19 |
+
>>> import nltk
|
20 |
+
>>> from nltk.corpus.reader import CHILDESCorpusReader
|
21 |
+
>>> corpus_root = nltk.data.find('corpora/childes/data-xml/Eng-USA-MOR/')
|
22 |
+
|
23 |
+
Reading files in the Valian corpus (Valian, 1991).
|
24 |
+
|
25 |
+
>>> valian = CHILDESCorpusReader(corpus_root, 'Valian/.*.xml')
|
26 |
+
>>> valian.fileids()
|
27 |
+
['Valian/01a.xml', 'Valian/01b.xml', 'Valian/02a.xml', 'Valian/02b.xml',...
|
28 |
+
|
29 |
+
Count the number of files
|
30 |
+
|
31 |
+
>>> len(valian.fileids())
|
32 |
+
43
|
33 |
+
|
34 |
+
Printing properties of the corpus files.
|
35 |
+
|
36 |
+
>>> corpus_data = valian.corpus(valian.fileids())
|
37 |
+
>>> print(corpus_data[0]['Lang'])
|
38 |
+
eng
|
39 |
+
>>> for key in sorted(corpus_data[0].keys()):
|
40 |
+
... print(key, ": ", corpus_data[0][key])
|
41 |
+
Corpus : valian
|
42 |
+
Date : 1986-03-04
|
43 |
+
Id : 01a
|
44 |
+
Lang : eng
|
45 |
+
Version : 2.0.1
|
46 |
+
{http://www.w3.org/2001/XMLSchema-instance}schemaLocation : http://www.talkbank.org/ns/talkbank http://talkbank.org/software/talkbank.xsd
|
47 |
+
|
48 |
+
Printing information of participants of the corpus. The most common codes for
|
49 |
+
the participants are 'CHI' (target child), 'MOT' (mother), and 'INV' (investigator).
|
50 |
+
|
51 |
+
>>> corpus_participants = valian.participants(valian.fileids())
|
52 |
+
>>> for this_corpus_participants in corpus_participants[:2]:
|
53 |
+
... for key in sorted(this_corpus_participants.keys()):
|
54 |
+
... dct = this_corpus_participants[key]
|
55 |
+
... print(key, ": ", [(k, dct[k]) for k in sorted(dct.keys())])
|
56 |
+
CHI : [('age', 'P2Y1M3D'), ('group', 'normal'), ('id', 'CHI'), ('language', 'eng'), ('role', 'Target_Child'), ('sex', 'female')]
|
57 |
+
INV : [('id', 'INV'), ('language', 'eng'), ('role', 'Investigator')]
|
58 |
+
MOT : [('id', 'MOT'), ('language', 'eng'), ('role', 'Mother')]
|
59 |
+
CHI : [('age', 'P2Y1M12D'), ('group', 'normal'), ('id', 'CHI'), ('language', 'eng'), ('role', 'Target_Child'), ('sex', 'female')]
|
60 |
+
INV : [('id', 'INV'), ('language', 'eng'), ('role', 'Investigator')]
|
61 |
+
MOT : [('id', 'MOT'), ('language', 'eng'), ('role', 'Mother')]
|
62 |
+
|
63 |
+
printing words.
|
64 |
+
|
65 |
+
>>> valian.words('Valian/01a.xml')
|
66 |
+
['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', ...
|
67 |
+
|
68 |
+
printing sentences.
|
69 |
+
|
70 |
+
>>> valian.sents('Valian/01a.xml')
|
71 |
+
[['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname',
|
72 |
+
'and', 'it', 'is', 'March', 'fourth', 'I', 'believe', 'and', 'when',
|
73 |
+
'was', "Parent's", 'birthday'], ["Child's"], ['oh', "I'm", 'sorry'],
|
74 |
+
["that's", 'okay'], ...
|
75 |
+
|
76 |
+
You can specify the participants with the argument *speaker*.
|
77 |
+
|
78 |
+
>>> valian.words('Valian/01a.xml',speaker=['INV'])
|
79 |
+
['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', ...
|
80 |
+
>>> valian.words('Valian/01a.xml',speaker=['MOT'])
|
81 |
+
["Child's", "that's", 'okay', 'February', 'first', 'nineteen', ...
|
82 |
+
>>> valian.words('Valian/01a.xml',speaker=['CHI'])
|
83 |
+
['tape', 'it', 'up', 'and', 'two', 'tape', 'players', 'have',...
|
84 |
+
|
85 |
+
|
86 |
+
tagged_words() and tagged_sents() return the usual (word,pos) tuple lists.
|
87 |
+
POS tags in the CHILDES are automatically assigned by MOR and POST programs
|
88 |
+
(MacWhinney, 2000).
|
89 |
+
|
90 |
+
>>> valian.tagged_words('Valian/01a.xml')[:30]
|
91 |
+
[('at', 'prep'), ('Parent', 'n:prop'), ("Lastname's", 'n:prop'), ('house', 'n'),
|
92 |
+
('with', 'prep'), ('Child', 'n:prop'), ('Lastname', 'n:prop'), ('and', 'coord'),
|
93 |
+
('it', 'pro'), ('is', 'v:cop'), ('March', 'n:prop'), ('fourth', 'adj'),
|
94 |
+
('I', 'pro:sub'), ('believe', 'v'), ('and', 'coord'), ('when', 'adv:wh'),
|
95 |
+
('was', 'v:cop'), ("Parent's", 'n:prop'), ('birthday', 'n'), ("Child's", 'n:prop'),
|
96 |
+
('oh', 'co'), ("I'm", 'pro:sub'), ('sorry', 'adj'), ("that's", 'pro:dem'),
|
97 |
+
('okay', 'adj'), ('February', 'n:prop'), ('first', 'adj'),
|
98 |
+
('nineteen', 'det:num'), ('eighty', 'det:num'), ('four', 'det:num')]
|
99 |
+
|
100 |
+
>>> valian.tagged_sents('Valian/01a.xml')[:10]
|
101 |
+
[[('at', 'prep'), ('Parent', 'n:prop'), ("Lastname's", 'n:prop'), ('house', 'n'),
|
102 |
+
('with', 'prep'), ('Child', 'n:prop'), ('Lastname', 'n:prop'), ('and', 'coord'),
|
103 |
+
('it', 'pro'), ('is', 'v:cop'), ('March', 'n:prop'), ('fourth', 'adj'),
|
104 |
+
('I', 'pro:sub'), ('believe', 'v'), ('and', 'coord'), ('when', 'adv:wh'),
|
105 |
+
('was', 'v:cop'), ("Parent's", 'n:prop'), ('birthday', 'n')],
|
106 |
+
[("Child's", 'n:prop')], [('oh', 'co'), ("I'm", 'pro:sub'), ('sorry', 'adj')],
|
107 |
+
[("that's", 'pro:dem'), ('okay', 'adj')],
|
108 |
+
[('February', 'n:prop'), ('first', 'adj'), ('nineteen', 'det:num'),
|
109 |
+
('eighty', 'det:num'), ('four', 'det:num')],
|
110 |
+
[('great', 'adj')],
|
111 |
+
[('and', 'coord'), ("she's", 'pro:sub'), ('two', 'det:num'), ('years', 'n'), ('old', 'adj')],
|
112 |
+
[('correct', 'adj')],
|
113 |
+
[('okay', 'co')], [('she', 'pro:sub'), ('just', 'adv:int'), ('turned', 'part'), ('two', 'det:num'),
|
114 |
+
('a', 'det'), ('month', 'n'), ('ago', 'adv')]]
|
115 |
+
|
116 |
+
When the argument *stem* is true, the word stems (e.g., 'is' -> 'be-3PS') are
|
117 |
+
used instead of the original words.
|
118 |
+
|
119 |
+
>>> valian.words('Valian/01a.xml')[:30]
|
120 |
+
['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', 'and', 'it', 'is', ...
|
121 |
+
>>> valian.words('Valian/01a.xml',stem=True)[:30]
|
122 |
+
['at', 'Parent', 'Lastname', 's', 'house', 'with', 'Child', 'Lastname', 'and', 'it', 'be-3S', ...
|
123 |
+
|
124 |
+
When the argument *replace* is true, the replaced words are used instead of
|
125 |
+
the original words.
|
126 |
+
|
127 |
+
>>> valian.words('Valian/01a.xml',speaker='CHI')[247]
|
128 |
+
'tikteat'
|
129 |
+
>>> valian.words('Valian/01a.xml',speaker='CHI',replace=True)[247]
|
130 |
+
'trick'
|
131 |
+
|
132 |
+
When the argument *relation* is true, the relational relationships in the
|
133 |
+
sentence are returned. See Sagae et al. (2010) for details of the relational
|
134 |
+
structure adopted in the CHILDES.
|
135 |
+
|
136 |
+
>>> valian.words('Valian/01a.xml',relation=True)[:10]
|
137 |
+
[[('at', 'prep', '1|0|ROOT'), ('Parent', 'n', '2|5|VOC'), ('Lastname', 'n', '3|5|MOD'), ('s', 'poss', '4|5|MOD'), ('house', 'n', '5|1|POBJ'), ('with', 'prep', '6|1|JCT'), ('Child', 'n', '7|8|NAME'), ('Lastname', 'n', '8|6|POBJ'), ('and', 'coord', '9|8|COORD'), ('it', 'pro', '10|11|SUBJ'), ('be-3S', 'v', '11|9|COMP'), ('March', 'n', '12|11|PRED'), ('fourth', 'adj', '13|12|MOD'), ('I', 'pro', '15|16|SUBJ'), ('believe', 'v', '16|14|ROOT'), ('and', 'coord', '18|17|ROOT'), ('when', 'adv', '19|20|PRED'), ('be-PAST', 'v', '20|18|COMP'), ('Parent', 'n', '21|23|MOD'), ('s', 'poss', '22|23|MOD'), ('birth', 'n', '23|20|SUBJ')], [('Child', 'n', '1|2|MOD'), ('s', 'poss', '2|0|ROOT')], [('oh', 'co', '1|4|COM'), ('I', 'pro', '3|4|SUBJ'), ('be', 'v', '4|0|ROOT'), ('sorry', 'adj', '5|4|PRED')], [('that', 'pro', '1|2|SUBJ'), ('be', 'v', '2|0|ROOT'), ('okay', 'adj', '3|2|PRED')], [('February', 'n', '1|6|VOC'), ('first', 'adj', '2|6|ENUM'), ('nineteen', 'det', '4|6|ENUM'), ('eighty', 'det', '5|6|ENUM'), ('four', 'det', '6|0|ROOT')], [('great', 'adj', '1|0|ROOT')], [('and', 'coord', '1|0|ROOT'), ('she', 'pro', '2|1|ROOT'), ('be', 'aux', '3|5|AUX'), ('two', 'det', '4|5|QUANT'), ('year-PL', 'n', '5|2|ROOT'), ('old', 'adj', '6|5|MOD')], [('correct', 'adj', '1|0|ROOT')], [('okay', 'co', '1|0|ROOT')], [('she', 'pro', '1|0|ROOT'), ('just', 'adv', '2|3|JCT'), ('turn-PERF', 'part', '3|1|XCOMP'), ('two', 'det', '4|6|QUANT'), ('a', 'det', '5|6|DET'), ('month', 'n', '6|3|OBJ'), ('ago', 'adv', '7|3|JCT')]]
|
138 |
+
|
139 |
+
Printing age. When the argument *month* is true, the age information in
|
140 |
+
the CHILDES format is converted into the number of months.
|
141 |
+
|
142 |
+
>>> valian.age()
|
143 |
+
['P2Y1M3D', 'P2Y1M12D', 'P1Y9M21D', 'P1Y9M28D', 'P2Y1M23D', ...
|
144 |
+
>>> valian.age('Valian/01a.xml')
|
145 |
+
['P2Y1M3D']
|
146 |
+
>>> valian.age('Valian/01a.xml',month=True)
|
147 |
+
[25]
|
148 |
+
|
149 |
+
Printing MLU. The criteria for the MLU computation is broadly based on
|
150 |
+
Brown (1973).
|
151 |
+
|
152 |
+
>>> valian.MLU()
|
153 |
+
[2.3574660633484..., 2.292682926829..., 3.492857142857..., 2.961783439490...,
|
154 |
+
2.0842696629213..., 3.169811320754..., 3.137404580152..., 3.0578034682080...,
|
155 |
+
4.090163934426..., 3.488372093023..., 2.8773584905660..., 3.4792899408284...,
|
156 |
+
4.0111940298507..., 3.456790123456..., 4.487603305785..., 4.007936507936...,
|
157 |
+
5.25, 5.154696132596..., ...]
|
158 |
+
|
159 |
+
>>> valian.MLU('Valian/01a.xml')
|
160 |
+
[2.35746606334...]
|
161 |
+
|
162 |
+
|
163 |
+
Basic stuff
|
164 |
+
==============================
|
165 |
+
|
166 |
+
Count the number of words and sentences of each file.
|
167 |
+
|
168 |
+
>>> valian = CHILDESCorpusReader(corpus_root, 'Valian/.*.xml')
|
169 |
+
>>> for this_file in valian.fileids()[:6]:
|
170 |
+
... print(valian.corpus(this_file)[0]['Corpus'], valian.corpus(this_file)[0]['Id'])
|
171 |
+
... print("num of words: %i" % len(valian.words(this_file)))
|
172 |
+
... print("num of sents: %i" % len(valian.sents(this_file)))
|
173 |
+
valian 01a
|
174 |
+
num of words: 3606
|
175 |
+
num of sents: 1027
|
176 |
+
valian 01b
|
177 |
+
num of words: 4376
|
178 |
+
num of sents: 1274
|
179 |
+
valian 02a
|
180 |
+
num of words: 2673
|
181 |
+
num of sents: 801
|
182 |
+
valian 02b
|
183 |
+
num of words: 5020
|
184 |
+
num of sents: 1583
|
185 |
+
valian 03a
|
186 |
+
num of words: 2743
|
187 |
+
num of sents: 988
|
188 |
+
valian 03b
|
189 |
+
num of words: 4409
|
190 |
+
num of sents: 1397
|
lib/python3.10/site-packages/nltk/test/chunk.doctest
ADDED
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==========
|
5 |
+
Chunking
|
6 |
+
==========
|
7 |
+
|
8 |
+
>>> from nltk.chunk import *
|
9 |
+
>>> from nltk.chunk.util import *
|
10 |
+
>>> from nltk.chunk.regexp import *
|
11 |
+
>>> from nltk import Tree
|
12 |
+
|
13 |
+
>>> tagged_text = "[ The/DT cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] [ the/DT dog/NN ] chewed/VBD ./."
|
14 |
+
>>> gold_chunked_text = tagstr2tree(tagged_text)
|
15 |
+
>>> unchunked_text = gold_chunked_text.flatten()
|
16 |
+
|
17 |
+
Chunking uses a special regexp syntax for rules that delimit the chunks. These
|
18 |
+
rules must be converted to 'regular' regular expressions before a sentence can
|
19 |
+
be chunked.
|
20 |
+
|
21 |
+
>>> tag_pattern = "<DT>?<JJ>*<NN.*>"
|
22 |
+
>>> regexp_pattern = tag_pattern2re_pattern(tag_pattern)
|
23 |
+
>>> regexp_pattern
|
24 |
+
'(<(DT)>)?(<(JJ)>)*(<(NN[^\\{\\}<>]*)>)'
|
25 |
+
|
26 |
+
Construct some new chunking rules.
|
27 |
+
|
28 |
+
>>> chunk_rule = ChunkRule(r"<.*>+", "Chunk everything")
|
29 |
+
>>> strip_rule = StripRule(r"<VBD|IN|\.>", "Strip on verbs/prepositions")
|
30 |
+
>>> split_rule = SplitRule("<DT><NN>", "<DT><NN>",
|
31 |
+
... "Split successive determiner/noun pairs")
|
32 |
+
|
33 |
+
|
34 |
+
Create and score a series of chunk parsers, successively more complex.
|
35 |
+
|
36 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule], chunk_label='NP')
|
37 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text)
|
38 |
+
>>> print(chunked_text)
|
39 |
+
(S
|
40 |
+
(NP
|
41 |
+
The/DT
|
42 |
+
cat/NN
|
43 |
+
sat/VBD
|
44 |
+
on/IN
|
45 |
+
the/DT
|
46 |
+
mat/NN
|
47 |
+
the/DT
|
48 |
+
dog/NN
|
49 |
+
chewed/VBD
|
50 |
+
./.))
|
51 |
+
|
52 |
+
>>> chunkscore = ChunkScore()
|
53 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
54 |
+
>>> print(chunkscore.precision())
|
55 |
+
0.0
|
56 |
+
|
57 |
+
>>> print(chunkscore.recall())
|
58 |
+
0.0
|
59 |
+
|
60 |
+
>>> print(chunkscore.f_measure())
|
61 |
+
0
|
62 |
+
|
63 |
+
>>> for chunk in sorted(chunkscore.missed()): print(chunk)
|
64 |
+
(NP The/DT cat/NN)
|
65 |
+
(NP the/DT dog/NN)
|
66 |
+
(NP the/DT mat/NN)
|
67 |
+
|
68 |
+
>>> for chunk in chunkscore.incorrect(): print(chunk)
|
69 |
+
(NP
|
70 |
+
The/DT
|
71 |
+
cat/NN
|
72 |
+
sat/VBD
|
73 |
+
on/IN
|
74 |
+
the/DT
|
75 |
+
mat/NN
|
76 |
+
the/DT
|
77 |
+
dog/NN
|
78 |
+
chewed/VBD
|
79 |
+
./.)
|
80 |
+
|
81 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule],
|
82 |
+
... chunk_label='NP')
|
83 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text)
|
84 |
+
>>> print(chunked_text)
|
85 |
+
(S
|
86 |
+
(NP The/DT cat/NN)
|
87 |
+
sat/VBD
|
88 |
+
on/IN
|
89 |
+
(NP the/DT mat/NN the/DT dog/NN)
|
90 |
+
chewed/VBD
|
91 |
+
./.)
|
92 |
+
>>> assert chunked_text == chunk_parser.parse(list(unchunked_text))
|
93 |
+
|
94 |
+
>>> chunkscore = ChunkScore()
|
95 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
96 |
+
>>> chunkscore.precision()
|
97 |
+
0.5
|
98 |
+
|
99 |
+
>>> print(chunkscore.recall())
|
100 |
+
0.33333333...
|
101 |
+
|
102 |
+
>>> print(chunkscore.f_measure())
|
103 |
+
0.4
|
104 |
+
|
105 |
+
>>> for chunk in sorted(chunkscore.missed()): print(chunk)
|
106 |
+
(NP the/DT dog/NN)
|
107 |
+
(NP the/DT mat/NN)
|
108 |
+
|
109 |
+
>>> for chunk in chunkscore.incorrect(): print(chunk)
|
110 |
+
(NP the/DT mat/NN the/DT dog/NN)
|
111 |
+
|
112 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule, split_rule],
|
113 |
+
... chunk_label='NP')
|
114 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text, trace=True)
|
115 |
+
# Input:
|
116 |
+
<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>
|
117 |
+
# Chunk everything:
|
118 |
+
{<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>}
|
119 |
+
# Strip on verbs/prepositions:
|
120 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN> <DT> <NN>} <VBD> <.>
|
121 |
+
# Split successive determiner/noun pairs:
|
122 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN>}{<DT> <NN>} <VBD> <.>
|
123 |
+
>>> print(chunked_text)
|
124 |
+
(S
|
125 |
+
(NP The/DT cat/NN)
|
126 |
+
sat/VBD
|
127 |
+
on/IN
|
128 |
+
(NP the/DT mat/NN)
|
129 |
+
(NP the/DT dog/NN)
|
130 |
+
chewed/VBD
|
131 |
+
./.)
|
132 |
+
|
133 |
+
>>> chunkscore = ChunkScore()
|
134 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
135 |
+
>>> chunkscore.precision()
|
136 |
+
1.0
|
137 |
+
|
138 |
+
>>> chunkscore.recall()
|
139 |
+
1.0
|
140 |
+
|
141 |
+
>>> chunkscore.f_measure()
|
142 |
+
1.0
|
143 |
+
|
144 |
+
>>> chunkscore.missed()
|
145 |
+
[]
|
146 |
+
|
147 |
+
>>> chunkscore.incorrect()
|
148 |
+
[]
|
149 |
+
|
150 |
+
>>> chunk_parser.rules()
|
151 |
+
[<ChunkRule: '<.*>+'>, <StripRule: '<VBD|IN|\\.>'>,
|
152 |
+
<SplitRule: '<DT><NN>', '<DT><NN>'>]
|
153 |
+
|
154 |
+
Printing parsers:
|
155 |
+
|
156 |
+
>>> print(repr(chunk_parser))
|
157 |
+
<RegexpChunkParser with 3 rules>
|
158 |
+
>>> print(chunk_parser)
|
159 |
+
RegexpChunkParser with 3 rules:
|
160 |
+
Chunk everything
|
161 |
+
<ChunkRule: '<.*>+'>
|
162 |
+
Strip on verbs/prepositions
|
163 |
+
<StripRule: '<VBD|IN|\\.>'>
|
164 |
+
Split successive determiner/noun pairs
|
165 |
+
<SplitRule: '<DT><NN>', '<DT><NN>'>
|
166 |
+
|
167 |
+
Regression Tests
|
168 |
+
~~~~~~~~~~~~~~~~
|
169 |
+
ChunkParserI
|
170 |
+
------------
|
171 |
+
`ChunkParserI` is an abstract interface -- it is not meant to be
|
172 |
+
instantiated directly.
|
173 |
+
|
174 |
+
>>> ChunkParserI().parse([])
|
175 |
+
Traceback (most recent call last):
|
176 |
+
. . .
|
177 |
+
NotImplementedError
|
178 |
+
|
179 |
+
|
180 |
+
ChunkString
|
181 |
+
-----------
|
182 |
+
ChunkString can be built from a tree of tagged tuples, a tree of
|
183 |
+
trees, or a mixed list of both:
|
184 |
+
|
185 |
+
>>> t1 = Tree('S', [('w%d' % i, 't%d' % i) for i in range(10)])
|
186 |
+
>>> t2 = Tree('S', [Tree('t0', []), Tree('t1', ['c1'])])
|
187 |
+
>>> t3 = Tree('S', [('w0', 't0'), Tree('t1', ['c1'])])
|
188 |
+
>>> ChunkString(t1)
|
189 |
+
<ChunkString: '<t0><t1><t2><t3><t4><t5><t6><t7><t8><t9>'>
|
190 |
+
>>> ChunkString(t2)
|
191 |
+
<ChunkString: '<t0><t1>'>
|
192 |
+
>>> ChunkString(t3)
|
193 |
+
<ChunkString: '<t0><t1>'>
|
194 |
+
|
195 |
+
Other values generate an error:
|
196 |
+
|
197 |
+
>>> ChunkString(Tree('S', ['x']))
|
198 |
+
Traceback (most recent call last):
|
199 |
+
. . .
|
200 |
+
ValueError: chunk structures must contain tagged tokens or trees
|
201 |
+
|
202 |
+
The `str()` for a chunk string adds spaces to it, which makes it line
|
203 |
+
up with `str()` output for other chunk strings over the same
|
204 |
+
underlying input.
|
205 |
+
|
206 |
+
>>> cs = ChunkString(t1)
|
207 |
+
>>> print(cs)
|
208 |
+
<t0> <t1> <t2> <t3> <t4> <t5> <t6> <t7> <t8> <t9>
|
209 |
+
>>> cs.xform('<t3>', '{<t3>}')
|
210 |
+
>>> print(cs)
|
211 |
+
<t0> <t1> <t2> {<t3>} <t4> <t5> <t6> <t7> <t8> <t9>
|
212 |
+
|
213 |
+
The `_verify()` method makes sure that our transforms don't corrupt
|
214 |
+
the chunk string. By setting debug_level=2, `_verify()` will be
|
215 |
+
called at the end of every call to `xform`.
|
216 |
+
|
217 |
+
>>> cs = ChunkString(t1, debug_level=3)
|
218 |
+
|
219 |
+
>>> # tag not marked with <...>:
|
220 |
+
>>> cs.xform('<t3>', 't3')
|
221 |
+
Traceback (most recent call last):
|
222 |
+
. . .
|
223 |
+
ValueError: Transformation generated invalid chunkstring:
|
224 |
+
<t0><t1><t2>t3<t4><t5><t6><t7><t8><t9>
|
225 |
+
|
226 |
+
>>> # brackets not balanced:
|
227 |
+
>>> cs.xform('<t3>', '{<t3>')
|
228 |
+
Traceback (most recent call last):
|
229 |
+
. . .
|
230 |
+
ValueError: Transformation generated invalid chunkstring:
|
231 |
+
<t0><t1><t2>{<t3><t4><t5><t6><t7><t8><t9>
|
232 |
+
|
233 |
+
>>> # nested brackets:
|
234 |
+
>>> cs.xform('<t3><t4><t5>', '{<t3>{<t4>}<t5>}')
|
235 |
+
Traceback (most recent call last):
|
236 |
+
. . .
|
237 |
+
ValueError: Transformation generated invalid chunkstring:
|
238 |
+
<t0><t1><t2>{<t3>{<t4>}<t5>}<t6><t7><t8><t9>
|
239 |
+
|
240 |
+
>>> # modified tags:
|
241 |
+
>>> cs.xform('<t3>', '<t9>')
|
242 |
+
Traceback (most recent call last):
|
243 |
+
. . .
|
244 |
+
ValueError: Transformation generated invalid chunkstring: tag changed
|
245 |
+
|
246 |
+
>>> # added tags:
|
247 |
+
>>> cs.xform('<t9>', '<t9><t10>')
|
248 |
+
Traceback (most recent call last):
|
249 |
+
. . .
|
250 |
+
ValueError: Transformation generated invalid chunkstring: tag changed
|
251 |
+
|
252 |
+
Chunking Rules
|
253 |
+
--------------
|
254 |
+
|
255 |
+
Test the different rule constructors & __repr__ methods:
|
256 |
+
|
257 |
+
>>> r1 = RegexpChunkRule('<a|b>'+ChunkString.IN_STRIP_PATTERN,
|
258 |
+
... '{<a|b>}', 'chunk <a> and <b>')
|
259 |
+
>>> r2 = RegexpChunkRule(re.compile('<a|b>'+ChunkString.IN_STRIP_PATTERN),
|
260 |
+
... '{<a|b>}', 'chunk <a> and <b>')
|
261 |
+
>>> r3 = ChunkRule('<a|b>', 'chunk <a> and <b>')
|
262 |
+
>>> r4 = StripRule('<a|b>', 'strip <a> and <b>')
|
263 |
+
>>> r5 = UnChunkRule('<a|b>', 'unchunk <a> and <b>')
|
264 |
+
>>> r6 = MergeRule('<a>', '<b>', 'merge <a> w/ <b>')
|
265 |
+
>>> r7 = SplitRule('<a>', '<b>', 'split <a> from <b>')
|
266 |
+
>>> r8 = ExpandLeftRule('<a>', '<b>', 'expand left <a> <b>')
|
267 |
+
>>> r9 = ExpandRightRule('<a>', '<b>', 'expand right <a> <b>')
|
268 |
+
>>> for rule in r1, r2, r3, r4, r5, r6, r7, r8, r9:
|
269 |
+
... print(rule)
|
270 |
+
<RegexpChunkRule: '<a|b>(?=[^\\}]*(\\{|$))'->'{<a|b>}'>
|
271 |
+
<RegexpChunkRule: '<a|b>(?=[^\\}]*(\\{|$))'->'{<a|b>}'>
|
272 |
+
<ChunkRule: '<a|b>'>
|
273 |
+
<StripRule: '<a|b>'>
|
274 |
+
<UnChunkRule: '<a|b>'>
|
275 |
+
<MergeRule: '<a>', '<b>'>
|
276 |
+
<SplitRule: '<a>', '<b>'>
|
277 |
+
<ExpandLeftRule: '<a>', '<b>'>
|
278 |
+
<ExpandRightRule: '<a>', '<b>'>
|
279 |
+
|
280 |
+
`tag_pattern2re_pattern()` complains if the tag pattern looks problematic:
|
281 |
+
|
282 |
+
>>> tag_pattern2re_pattern('{}')
|
283 |
+
Traceback (most recent call last):
|
284 |
+
. . .
|
285 |
+
ValueError: Bad tag pattern: '{}'
|
286 |
+
|
287 |
+
RegexpChunkParser
|
288 |
+
-----------------
|
289 |
+
|
290 |
+
A warning is printed when parsing an empty sentence:
|
291 |
+
|
292 |
+
>>> parser = RegexpChunkParser([ChunkRule('<a>', '')])
|
293 |
+
>>> parser.parse(Tree('S', []))
|
294 |
+
Warning: parsing empty text
|
295 |
+
Tree('S', [])
|
296 |
+
|
297 |
+
RegexpParser
|
298 |
+
------------
|
299 |
+
|
300 |
+
>>> parser = RegexpParser('''
|
301 |
+
... NP: {<DT>? <JJ>* <NN>*} # NP
|
302 |
+
... P: {<IN>} # Preposition
|
303 |
+
... V: {<V.*>} # Verb
|
304 |
+
... PP: {<P> <NP>} # PP -> P NP
|
305 |
+
... VP: {<V> <NP|PP>*} # VP -> V (NP|PP)*
|
306 |
+
... ''')
|
307 |
+
>>> print(repr(parser))
|
308 |
+
<chunk.RegexpParser with 5 stages>
|
309 |
+
>>> print(parser)
|
310 |
+
chunk.RegexpParser with 5 stages:
|
311 |
+
RegexpChunkParser with 1 rules:
|
312 |
+
NP <ChunkRule: '<DT>? <JJ>* <NN>*'>
|
313 |
+
RegexpChunkParser with 1 rules:
|
314 |
+
Preposition <ChunkRule: '<IN>'>
|
315 |
+
RegexpChunkParser with 1 rules:
|
316 |
+
Verb <ChunkRule: '<V.*>'>
|
317 |
+
RegexpChunkParser with 1 rules:
|
318 |
+
PP -> P NP <ChunkRule: '<P> <NP>'>
|
319 |
+
RegexpChunkParser with 1 rules:
|
320 |
+
VP -> V (NP|PP)* <ChunkRule: '<V> <NP|PP>*'>
|
321 |
+
>>> print(parser.parse(unchunked_text, trace=True))
|
322 |
+
# Input:
|
323 |
+
<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>
|
324 |
+
# NP:
|
325 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN>}{<DT> <NN>} <VBD> <.>
|
326 |
+
# Input:
|
327 |
+
<NP> <VBD> <IN> <NP> <NP> <VBD> <.>
|
328 |
+
# Preposition:
|
329 |
+
<NP> <VBD> {<IN>} <NP> <NP> <VBD> <.>
|
330 |
+
# Input:
|
331 |
+
<NP> <VBD> <P> <NP> <NP> <VBD> <.>
|
332 |
+
# Verb:
|
333 |
+
<NP> {<VBD>} <P> <NP> <NP> {<VBD>} <.>
|
334 |
+
# Input:
|
335 |
+
<NP> <V> <P> <NP> <NP> <V> <.>
|
336 |
+
# PP -> P NP:
|
337 |
+
<NP> <V> {<P> <NP>} <NP> <V> <.>
|
338 |
+
# Input:
|
339 |
+
<NP> <V> <PP> <NP> <V> <.>
|
340 |
+
# VP -> V (NP|PP)*:
|
341 |
+
<NP> {<V> <PP> <NP>}{<V>} <.>
|
342 |
+
(S
|
343 |
+
(NP The/DT cat/NN)
|
344 |
+
(VP
|
345 |
+
(V sat/VBD)
|
346 |
+
(PP (P on/IN) (NP the/DT mat/NN))
|
347 |
+
(NP the/DT dog/NN))
|
348 |
+
(VP (V chewed/VBD))
|
349 |
+
./.)
|
350 |
+
|
351 |
+
Test parsing of other rule types:
|
352 |
+
|
353 |
+
>>> print(RegexpParser('''
|
354 |
+
... X:
|
355 |
+
... }<a><b>{ # strip rule
|
356 |
+
... <a>}{<b> # split rule
|
357 |
+
... <a>{}<b> # merge rule
|
358 |
+
... <a>{<b>}<c> # chunk rule w/ context
|
359 |
+
... '''))
|
360 |
+
chunk.RegexpParser with 1 stages:
|
361 |
+
RegexpChunkParser with 4 rules:
|
362 |
+
strip rule <StripRule: '<a><b>'>
|
363 |
+
split rule <SplitRule: '<a>', '<b>'>
|
364 |
+
merge rule <MergeRule: '<a>', '<b>'>
|
365 |
+
chunk rule w/ context <ChunkRuleWithContext: '<a>', '<b>', '<c>'>
|
366 |
+
|
367 |
+
Illegal patterns give an error message:
|
368 |
+
|
369 |
+
>>> print(RegexpParser('X: {<foo>} {<bar>}'))
|
370 |
+
Traceback (most recent call last):
|
371 |
+
. . .
|
372 |
+
ValueError: Illegal chunk pattern: {<foo>} {<bar>}
|
lib/python3.10/site-packages/nltk/test/classify.doctest
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=============
|
5 |
+
Classifiers
|
6 |
+
=============
|
7 |
+
|
8 |
+
>>> from nltk.test.classify_fixt import setup_module
|
9 |
+
>>> setup_module()
|
10 |
+
|
11 |
+
Classifiers label tokens with category labels (or *class labels*).
|
12 |
+
Typically, labels are represented with strings (such as ``"health"``
|
13 |
+
or ``"sports"``. In NLTK, classifiers are defined using classes that
|
14 |
+
implement the `ClassifierI` interface, which supports the following operations:
|
15 |
+
|
16 |
+
- self.classify(featureset)
|
17 |
+
- self.classify_many(featuresets)
|
18 |
+
- self.labels()
|
19 |
+
- self.prob_classify(featureset)
|
20 |
+
- self.prob_classify_many(featuresets)
|
21 |
+
|
22 |
+
NLTK defines several classifier classes:
|
23 |
+
|
24 |
+
- `ConditionalExponentialClassifier`
|
25 |
+
- `DecisionTreeClassifier`
|
26 |
+
- `MaxentClassifier`
|
27 |
+
- `NaiveBayesClassifier`
|
28 |
+
- `WekaClassifier`
|
29 |
+
|
30 |
+
Classifiers are typically created by training them on a training
|
31 |
+
corpus.
|
32 |
+
|
33 |
+
|
34 |
+
Regression Tests
|
35 |
+
~~~~~~~~~~~~~~~~
|
36 |
+
|
37 |
+
We define a very simple training corpus with 3 binary features: ['a',
|
38 |
+
'b', 'c'], and are two labels: ['x', 'y']. We use a simple feature set so
|
39 |
+
that the correct answers can be calculated analytically (although we
|
40 |
+
haven't done this yet for all tests).
|
41 |
+
|
42 |
+
>>> import nltk
|
43 |
+
>>> train = [
|
44 |
+
... (dict(a=1,b=1,c=1), 'y'),
|
45 |
+
... (dict(a=1,b=1,c=1), 'x'),
|
46 |
+
... (dict(a=1,b=1,c=0), 'y'),
|
47 |
+
... (dict(a=0,b=1,c=1), 'x'),
|
48 |
+
... (dict(a=0,b=1,c=1), 'y'),
|
49 |
+
... (dict(a=0,b=0,c=1), 'y'),
|
50 |
+
... (dict(a=0,b=1,c=0), 'x'),
|
51 |
+
... (dict(a=0,b=0,c=0), 'x'),
|
52 |
+
... (dict(a=0,b=1,c=1), 'y'),
|
53 |
+
... (dict(a=None,b=1,c=0), 'x'),
|
54 |
+
... ]
|
55 |
+
>>> test = [
|
56 |
+
... (dict(a=1,b=0,c=1)), # unseen
|
57 |
+
... (dict(a=1,b=0,c=0)), # unseen
|
58 |
+
... (dict(a=0,b=1,c=1)), # seen 3 times, labels=y,y,x
|
59 |
+
... (dict(a=0,b=1,c=0)), # seen 1 time, label=x
|
60 |
+
... ]
|
61 |
+
|
62 |
+
Test the Naive Bayes classifier:
|
63 |
+
|
64 |
+
>>> classifier = nltk.classify.NaiveBayesClassifier.train(train)
|
65 |
+
>>> sorted(classifier.labels())
|
66 |
+
['x', 'y']
|
67 |
+
>>> classifier.classify_many(test)
|
68 |
+
['y', 'x', 'y', 'x']
|
69 |
+
>>> for pdist in classifier.prob_classify_many(test):
|
70 |
+
... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y')))
|
71 |
+
0.2500 0.7500
|
72 |
+
0.5833 0.4167
|
73 |
+
0.3571 0.6429
|
74 |
+
0.7000 0.3000
|
75 |
+
>>> classifier.show_most_informative_features()
|
76 |
+
Most Informative Features
|
77 |
+
c = 0 x : y = 2.3 : 1.0
|
78 |
+
c = 1 y : x = 1.8 : 1.0
|
79 |
+
a = 1 y : x = 1.7 : 1.0
|
80 |
+
a = 0 x : y = 1.0 : 1.0
|
81 |
+
b = 0 x : y = 1.0 : 1.0
|
82 |
+
b = 1 x : y = 1.0 : 1.0
|
83 |
+
|
84 |
+
Test the Decision Tree classifier (without None):
|
85 |
+
|
86 |
+
>>> classifier = nltk.classify.DecisionTreeClassifier.train(
|
87 |
+
... train[:-1], entropy_cutoff=0,
|
88 |
+
... support_cutoff=0)
|
89 |
+
>>> sorted(classifier.labels())
|
90 |
+
['x', 'y']
|
91 |
+
>>> print(classifier)
|
92 |
+
c=0? .................................................. x
|
93 |
+
a=0? ................................................ x
|
94 |
+
a=1? ................................................ y
|
95 |
+
c=1? .................................................. y
|
96 |
+
<BLANKLINE>
|
97 |
+
>>> classifier.classify_many(test)
|
98 |
+
['y', 'y', 'y', 'x']
|
99 |
+
>>> for pdist in classifier.prob_classify_many(test):
|
100 |
+
... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y')))
|
101 |
+
Traceback (most recent call last):
|
102 |
+
. . .
|
103 |
+
NotImplementedError
|
104 |
+
|
105 |
+
|
106 |
+
Test the Decision Tree classifier (with None):
|
107 |
+
|
108 |
+
>>> classifier = nltk.classify.DecisionTreeClassifier.train(
|
109 |
+
... train, entropy_cutoff=0,
|
110 |
+
... support_cutoff=0)
|
111 |
+
>>> sorted(classifier.labels())
|
112 |
+
['x', 'y']
|
113 |
+
>>> print(classifier)
|
114 |
+
c=0? .................................................. x
|
115 |
+
a=0? ................................................ x
|
116 |
+
a=1? ................................................ y
|
117 |
+
a=None? ............................................. x
|
118 |
+
c=1? .................................................. y
|
119 |
+
<BLANKLINE>
|
120 |
+
|
121 |
+
|
122 |
+
Test SklearnClassifier, which requires the scikit-learn package.
|
123 |
+
|
124 |
+
>>> from nltk.classify import SklearnClassifier
|
125 |
+
>>> from sklearn.naive_bayes import BernoulliNB
|
126 |
+
>>> from sklearn.svm import SVC
|
127 |
+
>>> train_data = [({"a": 4, "b": 1, "c": 0}, "ham"),
|
128 |
+
... ({"a": 5, "b": 2, "c": 1}, "ham"),
|
129 |
+
... ({"a": 0, "b": 3, "c": 4}, "spam"),
|
130 |
+
... ({"a": 5, "b": 1, "c": 1}, "ham"),
|
131 |
+
... ({"a": 1, "b": 4, "c": 3}, "spam")]
|
132 |
+
>>> classif = SklearnClassifier(BernoulliNB()).train(train_data)
|
133 |
+
>>> test_data = [{"a": 3, "b": 2, "c": 1},
|
134 |
+
... {"a": 0, "b": 3, "c": 7}]
|
135 |
+
>>> classif.classify_many(test_data)
|
136 |
+
['ham', 'spam']
|
137 |
+
>>> classif = SklearnClassifier(SVC(), sparse=False).train(train_data)
|
138 |
+
>>> classif.classify_many(test_data)
|
139 |
+
['ham', 'spam']
|
140 |
+
|
141 |
+
Test the Maximum Entropy classifier training algorithms; they should all
|
142 |
+
generate the same results.
|
143 |
+
|
144 |
+
>>> def print_maxent_test_header():
|
145 |
+
... print(' '*11+''.join([' test[%s] ' % i
|
146 |
+
... for i in range(len(test))]))
|
147 |
+
... print(' '*11+' p(x) p(y)'*len(test))
|
148 |
+
... print('-'*(11+15*len(test)))
|
149 |
+
|
150 |
+
>>> def test_maxent(algorithm):
|
151 |
+
... print('%11s' % algorithm, end=' ')
|
152 |
+
... try:
|
153 |
+
... classifier = nltk.classify.MaxentClassifier.train(
|
154 |
+
... train, algorithm, trace=0, max_iter=1000)
|
155 |
+
... except Exception as e:
|
156 |
+
... print('Error: %r' % e)
|
157 |
+
... return
|
158 |
+
...
|
159 |
+
... for featureset in test:
|
160 |
+
... pdist = classifier.prob_classify(featureset)
|
161 |
+
... print('%8.2f%6.2f' % (pdist.prob('x'), pdist.prob('y')), end=' ')
|
162 |
+
... print()
|
163 |
+
|
164 |
+
>>> print_maxent_test_header(); test_maxent('GIS'); test_maxent('IIS')
|
165 |
+
test[0] test[1] test[2] test[3]
|
166 |
+
p(x) p(y) p(x) p(y) p(x) p(y) p(x) p(y)
|
167 |
+
-----------------------------------------------------------------------
|
168 |
+
GIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
169 |
+
IIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
170 |
+
|
171 |
+
>>> test_maxent('MEGAM'); test_maxent('TADM') # doctest: +SKIP
|
172 |
+
MEGAM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
173 |
+
TADM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
174 |
+
|
175 |
+
|
176 |
+
|
177 |
+
Regression tests for TypedMaxentFeatureEncoding
|
178 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
179 |
+
|
180 |
+
>>> from nltk.classify import maxent
|
181 |
+
>>> train = [
|
182 |
+
... ({'a': 1, 'b': 1, 'c': 1}, 'y'),
|
183 |
+
... ({'a': 5, 'b': 5, 'c': 5}, 'x'),
|
184 |
+
... ({'a': 0.9, 'b': 0.9, 'c': 0.9}, 'y'),
|
185 |
+
... ({'a': 5.5, 'b': 5.4, 'c': 5.3}, 'x'),
|
186 |
+
... ({'a': 0.8, 'b': 1.2, 'c': 1}, 'y'),
|
187 |
+
... ({'a': 5.1, 'b': 4.9, 'c': 5.2}, 'x')
|
188 |
+
... ]
|
189 |
+
|
190 |
+
>>> test = [
|
191 |
+
... {'a': 1, 'b': 0.8, 'c': 1.2},
|
192 |
+
... {'a': 5.2, 'b': 5.1, 'c': 5}
|
193 |
+
... ]
|
194 |
+
|
195 |
+
>>> encoding = maxent.TypedMaxentFeatureEncoding.train(
|
196 |
+
... train, count_cutoff=3, alwayson_features=True)
|
197 |
+
|
198 |
+
>>> classifier = maxent.MaxentClassifier.train(
|
199 |
+
... train, bernoulli=False, encoding=encoding, trace=0)
|
200 |
+
|
201 |
+
>>> classifier.classify_many(test)
|
202 |
+
['y', 'x']
|
lib/python3.10/site-packages/nltk/test/collections.doctest
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===========
|
5 |
+
Collections
|
6 |
+
===========
|
7 |
+
|
8 |
+
>>> import nltk
|
9 |
+
>>> from nltk.collections import *
|
10 |
+
|
11 |
+
Trie
|
12 |
+
----
|
13 |
+
|
14 |
+
Trie can be pickled:
|
15 |
+
|
16 |
+
>>> import pickle
|
17 |
+
>>> trie = nltk.collections.Trie(['a'])
|
18 |
+
>>> s = pickle.dumps(trie)
|
19 |
+
>>> pickle.loads(s)
|
20 |
+
{'a': {True: None}}
|
21 |
+
|
22 |
+
LazyIteratorList
|
23 |
+
----------------
|
24 |
+
|
25 |
+
Fetching the length of a LazyIteratorList object does not throw a StopIteration exception:
|
26 |
+
|
27 |
+
>>> lil = LazyIteratorList(i for i in range(1, 11))
|
28 |
+
>>> lil[-1]
|
29 |
+
10
|
30 |
+
>>> len(lil)
|
31 |
+
10
|
lib/python3.10/site-packages/nltk/test/collocations.doctest
ADDED
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==============
|
5 |
+
Collocations
|
6 |
+
==============
|
7 |
+
|
8 |
+
Overview
|
9 |
+
~~~~~~~~
|
10 |
+
|
11 |
+
Collocations are expressions of multiple words which commonly co-occur. For
|
12 |
+
example, the top ten bigram collocations in Genesis are listed below, as
|
13 |
+
measured using Pointwise Mutual Information.
|
14 |
+
|
15 |
+
>>> import nltk
|
16 |
+
>>> from nltk.collocations import *
|
17 |
+
>>> bigram_measures = nltk.collocations.BigramAssocMeasures()
|
18 |
+
>>> trigram_measures = nltk.collocations.TrigramAssocMeasures()
|
19 |
+
>>> fourgram_measures = nltk.collocations.QuadgramAssocMeasures()
|
20 |
+
>>> finder = BigramCollocationFinder.from_words(
|
21 |
+
... nltk.corpus.genesis.words('english-web.txt'))
|
22 |
+
>>> finder.nbest(bigram_measures.pmi, 10)
|
23 |
+
[('Allon', 'Bacuth'), ('Ashteroth', 'Karnaim'), ('Ben', 'Ammi'),
|
24 |
+
('En', 'Mishpat'), ('Jegar', 'Sahadutha'), ('Salt', 'Sea'),
|
25 |
+
('Whoever', 'sheds'), ('appoint', 'overseers'), ('aromatic', 'resin'),
|
26 |
+
('cutting', 'instrument')]
|
27 |
+
|
28 |
+
While these words are highly collocated, the expressions are also very
|
29 |
+
infrequent. Therefore it is useful to apply filters, such as ignoring all
|
30 |
+
bigrams which occur less than three times in the corpus:
|
31 |
+
|
32 |
+
>>> finder.apply_freq_filter(3)
|
33 |
+
>>> finder.nbest(bigram_measures.pmi, 10)
|
34 |
+
[('Beer', 'Lahai'), ('Lahai', 'Roi'), ('gray', 'hairs'),
|
35 |
+
('ewe', 'lambs'), ('Most', 'High'), ('many', 'colors'),
|
36 |
+
('burnt', 'offering'), ('Paddan', 'Aram'), ('east', 'wind'),
|
37 |
+
('living', 'creature')]
|
38 |
+
|
39 |
+
We may similarly find collocations among tagged words:
|
40 |
+
|
41 |
+
>>> finder = BigramCollocationFinder.from_words(
|
42 |
+
... nltk.corpus.brown.tagged_words('ca01', tagset='universal'))
|
43 |
+
>>> finder.nbest(bigram_measures.pmi, 5)
|
44 |
+
[(('1,119', 'NUM'), ('votes', 'NOUN')),
|
45 |
+
(('1962', 'NUM'), ("governor's", 'NOUN')),
|
46 |
+
(('637', 'NUM'), ('E.', 'NOUN')),
|
47 |
+
(('Alpharetta', 'NOUN'), ('prison', 'NOUN')),
|
48 |
+
(('Bar', 'NOUN'), ('Association', 'NOUN'))]
|
49 |
+
|
50 |
+
Or tags alone:
|
51 |
+
|
52 |
+
>>> finder = BigramCollocationFinder.from_words(t for w, t in
|
53 |
+
... nltk.corpus.brown.tagged_words('ca01', tagset='universal'))
|
54 |
+
>>> finder.nbest(bigram_measures.pmi, 10)
|
55 |
+
[('PRT', 'VERB'), ('PRON', 'VERB'), ('ADP', 'DET'), ('.', 'PRON'), ('DET', 'ADJ'),
|
56 |
+
('CONJ', 'PRON'), ('ADP', 'NUM'), ('NUM', '.'), ('ADV', 'ADV'), ('VERB', 'ADV')]
|
57 |
+
|
58 |
+
Or spanning intervening words:
|
59 |
+
|
60 |
+
>>> finder = BigramCollocationFinder.from_words(
|
61 |
+
... nltk.corpus.genesis.words('english-web.txt'),
|
62 |
+
... window_size = 20)
|
63 |
+
>>> finder.apply_freq_filter(2)
|
64 |
+
>>> ignored_words = nltk.corpus.stopwords.words('english')
|
65 |
+
>>> finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
|
66 |
+
>>> finder.nbest(bigram_measures.likelihood_ratio, 10)
|
67 |
+
[('chief', 'chief'), ('became', 'father'), ('years', 'became'),
|
68 |
+
('hundred', 'years'), ('lived', 'became'), ('king', 'king'),
|
69 |
+
('lived', 'years'), ('became', 'became'), ('chief', 'chiefs'),
|
70 |
+
('hundred', 'became')]
|
71 |
+
|
72 |
+
Finders
|
73 |
+
~~~~~~~
|
74 |
+
|
75 |
+
The collocations package provides collocation finders which by default
|
76 |
+
consider all ngrams in a text as candidate collocations:
|
77 |
+
|
78 |
+
>>> text = "I do not like green eggs and ham, I do not like them Sam I am!"
|
79 |
+
>>> tokens = nltk.wordpunct_tokenize(text)
|
80 |
+
>>> finder = BigramCollocationFinder.from_words(tokens)
|
81 |
+
>>> scored = finder.score_ngrams(bigram_measures.raw_freq)
|
82 |
+
>>> sorted(bigram for bigram, score in scored)
|
83 |
+
[(',', 'I'), ('I', 'am'), ('I', 'do'), ('Sam', 'I'), ('am', '!'),
|
84 |
+
('and', 'ham'), ('do', 'not'), ('eggs', 'and'), ('green', 'eggs'),
|
85 |
+
('ham', ','), ('like', 'green'), ('like', 'them'), ('not', 'like'),
|
86 |
+
('them', 'Sam')]
|
87 |
+
|
88 |
+
We could otherwise construct the collocation finder from manually-derived
|
89 |
+
FreqDists:
|
90 |
+
|
91 |
+
>>> word_fd = nltk.FreqDist(tokens)
|
92 |
+
>>> bigram_fd = nltk.FreqDist(nltk.bigrams(tokens))
|
93 |
+
>>> finder = BigramCollocationFinder(word_fd, bigram_fd)
|
94 |
+
>>> scored == finder.score_ngrams(bigram_measures.raw_freq)
|
95 |
+
True
|
96 |
+
|
97 |
+
A similar interface is provided for trigrams:
|
98 |
+
|
99 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens)
|
100 |
+
>>> scored = finder.score_ngrams(trigram_measures.raw_freq)
|
101 |
+
>>> set(trigram for trigram, score in scored) == set(nltk.trigrams(tokens))
|
102 |
+
True
|
103 |
+
|
104 |
+
We may want to select only the top n results:
|
105 |
+
|
106 |
+
>>> sorted(finder.nbest(trigram_measures.raw_freq, 2))
|
107 |
+
[('I', 'do', 'not'), ('do', 'not', 'like')]
|
108 |
+
|
109 |
+
Alternatively, we can select those above a minimum score value:
|
110 |
+
|
111 |
+
>>> sorted(finder.above_score(trigram_measures.raw_freq,
|
112 |
+
... 1.0 / len(tuple(nltk.trigrams(tokens)))))
|
113 |
+
[('I', 'do', 'not'), ('do', 'not', 'like')]
|
114 |
+
|
115 |
+
Now spanning intervening words:
|
116 |
+
|
117 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens)
|
118 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens, window_size=4)
|
119 |
+
>>> sorted(finder.nbest(trigram_measures.raw_freq, 4))
|
120 |
+
[('I', 'do', 'like'), ('I', 'do', 'not'), ('I', 'not', 'like'), ('do', 'not', 'like')]
|
121 |
+
|
122 |
+
A closer look at the finder's ngram frequencies:
|
123 |
+
|
124 |
+
>>> sorted(finder.ngram_fd.items(), key=lambda t: (-t[1], t[0]))[:10]
|
125 |
+
[(('I', 'do', 'like'), 2), (('I', 'do', 'not'), 2), (('I', 'not', 'like'), 2),
|
126 |
+
(('do', 'not', 'like'), 2), ((',', 'I', 'do'), 1), ((',', 'I', 'not'), 1),
|
127 |
+
((',', 'do', 'not'), 1), (('I', 'am', '!'), 1), (('Sam', 'I', '!'), 1),
|
128 |
+
(('Sam', 'I', 'am'), 1)]
|
129 |
+
|
130 |
+
A similar interface is provided for fourgrams:
|
131 |
+
|
132 |
+
>>> finder_4grams = QuadgramCollocationFinder.from_words(tokens)
|
133 |
+
>>> scored_4grams = finder_4grams.score_ngrams(fourgram_measures.raw_freq)
|
134 |
+
>>> set(fourgram for fourgram, score in scored_4grams) == set(nltk.ngrams(tokens, n=4))
|
135 |
+
True
|
136 |
+
|
137 |
+
Filtering candidates
|
138 |
+
~~~~~~~~~~~~~~~~~~~~
|
139 |
+
|
140 |
+
All the ngrams in a text are often too many to be useful when finding
|
141 |
+
collocations. It is generally useful to remove some words or punctuation,
|
142 |
+
and to require a minimum frequency for candidate collocations.
|
143 |
+
|
144 |
+
Given our sample text above, if we remove all trigrams containing personal
|
145 |
+
pronouns from candidature, score_ngrams should return 6 less results, and
|
146 |
+
'do not like' will be the only candidate which occurs more than once:
|
147 |
+
|
148 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens)
|
149 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
150 |
+
14
|
151 |
+
>>> finder.apply_word_filter(lambda w: w in ('I', 'me'))
|
152 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
153 |
+
8
|
154 |
+
>>> sorted(finder.above_score(trigram_measures.raw_freq,
|
155 |
+
... 1.0 / len(tuple(nltk.trigrams(tokens)))))
|
156 |
+
[('do', 'not', 'like')]
|
157 |
+
|
158 |
+
Sometimes a filter is a function on the whole ngram, rather than each word,
|
159 |
+
such as if we may permit 'and' to appear in the middle of a trigram, but
|
160 |
+
not on either edge:
|
161 |
+
|
162 |
+
>>> finder.apply_ngram_filter(lambda w1, w2, w3: 'and' in (w1, w3))
|
163 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
164 |
+
6
|
165 |
+
|
166 |
+
Finally, it is often important to remove low frequency candidates, as we
|
167 |
+
lack sufficient evidence about their significance as collocations:
|
168 |
+
|
169 |
+
>>> finder.apply_freq_filter(2)
|
170 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
171 |
+
1
|
172 |
+
|
173 |
+
Association measures
|
174 |
+
~~~~~~~~~~~~~~~~~~~~
|
175 |
+
|
176 |
+
A number of measures are available to score collocations or other associations.
|
177 |
+
The arguments to measure functions are marginals of a contingency table, in the
|
178 |
+
bigram case (n_ii, (n_ix, n_xi), n_xx)::
|
179 |
+
|
180 |
+
w1 ~w1
|
181 |
+
------ ------
|
182 |
+
w2 | n_ii | n_oi | = n_xi
|
183 |
+
------ ------
|
184 |
+
~w2 | n_io | n_oo |
|
185 |
+
------ ------
|
186 |
+
= n_ix TOTAL = n_xx
|
187 |
+
|
188 |
+
We test their calculation using some known values presented in Manning and
|
189 |
+
Schutze's text and other papers.
|
190 |
+
|
191 |
+
Student's t: examples from Manning and Schutze 5.3.2
|
192 |
+
|
193 |
+
>>> print('%0.4f' % bigram_measures.student_t(8, (15828, 4675), 14307668))
|
194 |
+
0.9999
|
195 |
+
>>> print('%0.4f' % bigram_measures.student_t(20, (42, 20), 14307668))
|
196 |
+
4.4721
|
197 |
+
|
198 |
+
Chi-square: examples from Manning and Schutze 5.3.3
|
199 |
+
|
200 |
+
>>> print('%0.2f' % bigram_measures.chi_sq(8, (15828, 4675), 14307668))
|
201 |
+
1.55
|
202 |
+
>>> print('%0.0f' % bigram_measures.chi_sq(59, (67, 65), 571007))
|
203 |
+
456400
|
204 |
+
|
205 |
+
Likelihood ratios: examples from Dunning, CL, 1993
|
206 |
+
|
207 |
+
>>> print('%0.2f' % bigram_measures.likelihood_ratio(110, (2552, 221), 31777))
|
208 |
+
270.72
|
209 |
+
>>> print('%0.2f' % bigram_measures.likelihood_ratio(8, (13, 32), 31777))
|
210 |
+
95.29
|
211 |
+
|
212 |
+
Pointwise Mutual Information: examples from Manning and Schutze 5.4
|
213 |
+
|
214 |
+
>>> print('%0.2f' % bigram_measures.pmi(20, (42, 20), 14307668))
|
215 |
+
18.38
|
216 |
+
>>> print('%0.2f' % bigram_measures.pmi(20, (15019, 15629), 14307668))
|
217 |
+
0.29
|
218 |
+
|
219 |
+
TODO: Find authoritative results for trigrams.
|
220 |
+
|
221 |
+
Using contingency table values
|
222 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
223 |
+
|
224 |
+
While frequency counts make marginals readily available for collocation
|
225 |
+
finding, it is common to find published contingency table values. The
|
226 |
+
collocations package therefore provides a wrapper, ContingencyMeasures, which
|
227 |
+
wraps an association measures class, providing association measures which
|
228 |
+
take contingency values as arguments, (n_ii, n_io, n_oi, n_oo) in the
|
229 |
+
bigram case.
|
230 |
+
|
231 |
+
>>> from nltk.metrics import ContingencyMeasures
|
232 |
+
>>> cont_bigram_measures = ContingencyMeasures(bigram_measures)
|
233 |
+
>>> print('%0.2f' % cont_bigram_measures.likelihood_ratio(8, 5, 24, 31740))
|
234 |
+
95.29
|
235 |
+
>>> print('%0.2f' % cont_bigram_measures.chi_sq(8, 15820, 4667, 14287173))
|
236 |
+
1.55
|
237 |
+
|
238 |
+
Ranking and correlation
|
239 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
240 |
+
|
241 |
+
It is useful to consider the results of finding collocations as a ranking, and
|
242 |
+
the rankings output using different association measures can be compared using
|
243 |
+
the Spearman correlation coefficient.
|
244 |
+
|
245 |
+
Ranks can be assigned to a sorted list of results trivially by assigning
|
246 |
+
strictly increasing ranks to each result:
|
247 |
+
|
248 |
+
>>> from nltk.metrics.spearman import *
|
249 |
+
>>> results_list = ['item1', 'item2', 'item3', 'item4', 'item5']
|
250 |
+
>>> print(list(ranks_from_sequence(results_list)))
|
251 |
+
[('item1', 0), ('item2', 1), ('item3', 2), ('item4', 3), ('item5', 4)]
|
252 |
+
|
253 |
+
If scores are available for each result, we may allow sufficiently similar
|
254 |
+
results (differing by no more than rank_gap) to be assigned the same rank:
|
255 |
+
|
256 |
+
>>> results_scored = [('item1', 50.0), ('item2', 40.0), ('item3', 38.0),
|
257 |
+
... ('item4', 35.0), ('item5', 14.0)]
|
258 |
+
>>> print(list(ranks_from_scores(results_scored, rank_gap=5)))
|
259 |
+
[('item1', 0), ('item2', 1), ('item3', 1), ('item4', 1), ('item5', 4)]
|
260 |
+
|
261 |
+
The Spearman correlation coefficient gives a number from -1.0 to 1.0 comparing
|
262 |
+
two rankings. A coefficient of 1.0 indicates identical rankings; -1.0 indicates
|
263 |
+
exact opposite rankings.
|
264 |
+
|
265 |
+
>>> print('%0.1f' % spearman_correlation(
|
266 |
+
... ranks_from_sequence(results_list),
|
267 |
+
... ranks_from_sequence(results_list)))
|
268 |
+
1.0
|
269 |
+
>>> print('%0.1f' % spearman_correlation(
|
270 |
+
... ranks_from_sequence(reversed(results_list)),
|
271 |
+
... ranks_from_sequence(results_list)))
|
272 |
+
-1.0
|
273 |
+
>>> results_list2 = ['item2', 'item3', 'item1', 'item5', 'item4']
|
274 |
+
>>> print('%0.1f' % spearman_correlation(
|
275 |
+
... ranks_from_sequence(results_list),
|
276 |
+
... ranks_from_sequence(results_list2)))
|
277 |
+
0.6
|
278 |
+
>>> print('%0.1f' % spearman_correlation(
|
279 |
+
... ranks_from_sequence(reversed(results_list)),
|
280 |
+
... ranks_from_sequence(results_list2)))
|
281 |
+
-0.6
|
282 |
+
|
283 |
+
Keywords
|
284 |
+
~~~~~~~~
|
285 |
+
|
286 |
+
Bigram association metrics can also be used to perform keyword analysis. . For example, this finds the keywords
|
287 |
+
associated with the "romance" section of the Brown corpus as measured by likelihood ratio:
|
288 |
+
|
289 |
+
>>> romance = nltk.FreqDist(w.lower() for w in nltk.corpus.brown.words(categories='romance') if w.isalpha())
|
290 |
+
>>> freq = nltk.FreqDist(w.lower() for w in nltk.corpus.brown.words() if w.isalpha())
|
291 |
+
|
292 |
+
>>> key = nltk.FreqDist()
|
293 |
+
>>> for w in romance:
|
294 |
+
... key[w] = bigram_measures.likelihood_ratio(romance[w], (freq[w], romance.N()), freq.N())
|
295 |
+
|
296 |
+
>>> for k,v in key.most_common(10):
|
297 |
+
... print(f'{k:10s} {v:9.3f}')
|
298 |
+
she 1163.325
|
299 |
+
i 995.961
|
300 |
+
her 930.528
|
301 |
+
you 513.149
|
302 |
+
of 501.891
|
303 |
+
is 463.386
|
304 |
+
had 421.615
|
305 |
+
he 411.000
|
306 |
+
the 347.632
|
307 |
+
said 300.811
|
lib/python3.10/site-packages/nltk/test/conftest.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from nltk.corpus.reader import CorpusReader
|
4 |
+
|
5 |
+
|
6 |
+
@pytest.fixture(autouse=True)
|
7 |
+
def mock_plot(mocker):
|
8 |
+
"""Disable matplotlib plotting in test code"""
|
9 |
+
|
10 |
+
try:
|
11 |
+
import matplotlib.pyplot as plt
|
12 |
+
|
13 |
+
mocker.patch.object(plt, "gca")
|
14 |
+
mocker.patch.object(plt, "show")
|
15 |
+
except ImportError:
|
16 |
+
pass
|
17 |
+
|
18 |
+
|
19 |
+
@pytest.fixture(scope="module", autouse=True)
|
20 |
+
def teardown_loaded_corpora():
|
21 |
+
"""
|
22 |
+
After each test session ends (either doctest or unit test),
|
23 |
+
unload any loaded corpora
|
24 |
+
"""
|
25 |
+
|
26 |
+
yield # first, wait for the test to end
|
27 |
+
|
28 |
+
import nltk.corpus
|
29 |
+
|
30 |
+
for name in dir(nltk.corpus):
|
31 |
+
obj = getattr(nltk.corpus, name, None)
|
32 |
+
if isinstance(obj, CorpusReader) and hasattr(obj, "_unload"):
|
33 |
+
obj._unload()
|
lib/python3.10/site-packages/nltk/test/crubadan.doctest
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
Crubadan Corpus Reader
|
5 |
+
======================
|
6 |
+
|
7 |
+
Crubadan is an NLTK corpus reader for ngram files provided
|
8 |
+
by the Crubadan project. It supports several languages.
|
9 |
+
|
10 |
+
>>> from nltk.corpus import crubadan
|
11 |
+
>>> crubadan.langs()
|
12 |
+
['abk', 'abn',..., 'zpa', 'zul']
|
13 |
+
|
14 |
+
----------------------------------------
|
15 |
+
Language code mapping and helper methods
|
16 |
+
----------------------------------------
|
17 |
+
|
18 |
+
The web crawler that generates the 3-gram frequencies works at the
|
19 |
+
level of "writing systems" rather than languages. Writing systems
|
20 |
+
are assigned internal 2-3 letter codes that require mapping to the
|
21 |
+
standard ISO 639-3 codes. For more information, please refer to
|
22 |
+
the README in nltk_data/crubadan folder after installing it.
|
23 |
+
|
24 |
+
To translate ISO 639-3 codes to "Crubadan Code":
|
25 |
+
|
26 |
+
>>> crubadan.iso_to_crubadan('eng')
|
27 |
+
'en'
|
28 |
+
>>> crubadan.iso_to_crubadan('fra')
|
29 |
+
'fr'
|
30 |
+
>>> crubadan.iso_to_crubadan('aaa')
|
31 |
+
|
32 |
+
In reverse, print ISO 639-3 code if we have the Crubadan Code:
|
33 |
+
|
34 |
+
>>> crubadan.crubadan_to_iso('en')
|
35 |
+
'eng'
|
36 |
+
>>> crubadan.crubadan_to_iso('fr')
|
37 |
+
'fra'
|
38 |
+
>>> crubadan.crubadan_to_iso('aa')
|
39 |
+
|
40 |
+
---------------------------
|
41 |
+
Accessing ngram frequencies
|
42 |
+
---------------------------
|
43 |
+
|
44 |
+
On initialization the reader will create a dictionary of every
|
45 |
+
language supported by the Crubadan project, mapping the ISO 639-3
|
46 |
+
language code to its corresponding ngram frequency.
|
47 |
+
|
48 |
+
You can access individual language FreqDist and the ngrams within them as follows:
|
49 |
+
|
50 |
+
>>> english_fd = crubadan.lang_freq('eng')
|
51 |
+
>>> english_fd['the']
|
52 |
+
728135
|
53 |
+
|
54 |
+
Above accesses the FreqDist of English and returns the frequency of the ngram 'the'.
|
55 |
+
A ngram that isn't found within the language will return 0:
|
56 |
+
|
57 |
+
>>> english_fd['sometest']
|
58 |
+
0
|
59 |
+
|
60 |
+
A language that isn't supported will raise an exception:
|
61 |
+
|
62 |
+
>>> crubadan.lang_freq('elvish')
|
63 |
+
Traceback (most recent call last):
|
64 |
+
...
|
65 |
+
RuntimeError: Unsupported language.
|
lib/python3.10/site-packages/nltk/test/drt.doctest
ADDED
@@ -0,0 +1,515 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
================================
|
5 |
+
Discourse Representation Theory
|
6 |
+
================================
|
7 |
+
|
8 |
+
>>> from nltk.sem import logic
|
9 |
+
>>> from nltk.inference import TableauProver
|
10 |
+
|
11 |
+
Overview
|
12 |
+
========
|
13 |
+
|
14 |
+
A DRS can be created with the ``DRS()`` constructor. This takes two arguments: a list of
|
15 |
+
discourse referents and list of conditions. .
|
16 |
+
|
17 |
+
>>> from nltk.sem.drt import *
|
18 |
+
>>> dexpr = DrtExpression.fromstring
|
19 |
+
>>> man_x = dexpr('man(x)')
|
20 |
+
>>> walk_x = dexpr('walk(x)')
|
21 |
+
>>> x = dexpr('x')
|
22 |
+
>>> print(DRS([x], [man_x, walk_x]))
|
23 |
+
([x],[man(x), walk(x)])
|
24 |
+
|
25 |
+
The ``parse()`` method can also be applied directly to DRS
|
26 |
+
expressions, which allows them to be specified more
|
27 |
+
easily.
|
28 |
+
|
29 |
+
>>> drs1 = dexpr('([x],[man(x),walk(x)])')
|
30 |
+
>>> print(drs1)
|
31 |
+
([x],[man(x), walk(x)])
|
32 |
+
|
33 |
+
DRSs can be *merged* using the ``+`` operator.
|
34 |
+
|
35 |
+
>>> drs2 = dexpr('([y],[woman(y),stop(y)])')
|
36 |
+
>>> drs3 = drs1 + drs2
|
37 |
+
>>> print(drs3)
|
38 |
+
(([x],[man(x), walk(x)]) + ([y],[woman(y), stop(y)]))
|
39 |
+
>>> print(drs3.simplify())
|
40 |
+
([x,y],[man(x), walk(x), woman(y), stop(y)])
|
41 |
+
|
42 |
+
We can embed DRSs as components of an ``implies`` condition.
|
43 |
+
|
44 |
+
>>> s = '([], [(%s -> %s)])' % (drs1, drs2)
|
45 |
+
>>> print(dexpr(s))
|
46 |
+
([],[(([x],[man(x), walk(x)]) -> ([y],[woman(y), stop(y)]))])
|
47 |
+
|
48 |
+
The ``fol()`` method converts DRSs into FOL formulae.
|
49 |
+
|
50 |
+
>>> print(dexpr(r'([x],[man(x), walks(x)])').fol())
|
51 |
+
exists x.(man(x) & walks(x))
|
52 |
+
>>> print(dexpr(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])').fol())
|
53 |
+
all x.(man(x) -> walks(x))
|
54 |
+
|
55 |
+
In order to visualize a DRS, the ``pretty_format()`` method can be used.
|
56 |
+
|
57 |
+
>>> print(drs3.pretty_format())
|
58 |
+
_________ __________
|
59 |
+
| x | | y |
|
60 |
+
(|---------| + |----------|)
|
61 |
+
| man(x) | | woman(y) |
|
62 |
+
| walk(x) | | stop(y) |
|
63 |
+
|_________| |__________|
|
64 |
+
|
65 |
+
|
66 |
+
Parse to semantics
|
67 |
+
------------------
|
68 |
+
|
69 |
+
..
|
70 |
+
>>> logic._counter._value = 0
|
71 |
+
|
72 |
+
DRSs can be used for building compositional semantics in a feature
|
73 |
+
based grammar. To specify that we want to use DRSs, the appropriate
|
74 |
+
logic parser needs be passed as a parameter to ``load_earley()``
|
75 |
+
|
76 |
+
>>> from nltk.parse import load_parser
|
77 |
+
>>> from nltk.sem.drt import DrtParser
|
78 |
+
>>> parser = load_parser('grammars/book_grammars/drt.fcfg', trace=0, logic_parser=DrtParser())
|
79 |
+
>>> for tree in parser.parse('a dog barks'.split()):
|
80 |
+
... print(tree.label()['SEM'].simplify())
|
81 |
+
...
|
82 |
+
([x],[dog(x), bark(x)])
|
83 |
+
|
84 |
+
Alternatively, a ``FeatStructReader`` can be passed with the ``logic_parser`` set on it
|
85 |
+
|
86 |
+
>>> from nltk.featstruct import FeatStructReader
|
87 |
+
>>> from nltk.grammar import FeatStructNonterminal
|
88 |
+
>>> parser = load_parser('grammars/book_grammars/drt.fcfg', trace=0, fstruct_reader=FeatStructReader(fdict_class=FeatStructNonterminal, logic_parser=DrtParser()))
|
89 |
+
>>> for tree in parser.parse('every girl chases a dog'.split()):
|
90 |
+
... print(tree.label()['SEM'].simplify().normalize())
|
91 |
+
...
|
92 |
+
([],[(([z1],[girl(z1)]) -> ([z2],[dog(z2), chase(z1,z2)]))])
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
Unit Tests
|
97 |
+
==========
|
98 |
+
|
99 |
+
Parser
|
100 |
+
------
|
101 |
+
|
102 |
+
>>> print(dexpr(r'([x,y],[sees(x,y)])'))
|
103 |
+
([x,y],[sees(x,y)])
|
104 |
+
>>> print(dexpr(r'([x],[man(x), walks(x)])'))
|
105 |
+
([x],[man(x), walks(x)])
|
106 |
+
>>> print(dexpr(r'\x.([],[man(x), walks(x)])'))
|
107 |
+
\x.([],[man(x), walks(x)])
|
108 |
+
>>> print(dexpr(r'\x.\y.([],[sees(x,y)])'))
|
109 |
+
\x y.([],[sees(x,y)])
|
110 |
+
|
111 |
+
>>> print(dexpr(r'([x,y],[(x = y)])'))
|
112 |
+
([x,y],[(x = y)])
|
113 |
+
>>> print(dexpr(r'([x,y],[(x != y)])'))
|
114 |
+
([x,y],[-(x = y)])
|
115 |
+
|
116 |
+
>>> print(dexpr(r'\x.([],[walks(x)])(john)'))
|
117 |
+
(\x.([],[walks(x)]))(john)
|
118 |
+
>>> print(dexpr(r'\R.\x.([],[big(x,R)])(\y.([],[mouse(y)]))'))
|
119 |
+
(\R x.([],[big(x,R)]))(\y.([],[mouse(y)]))
|
120 |
+
|
121 |
+
>>> print(dexpr(r'(([x],[walks(x)]) + ([y],[runs(y)]))'))
|
122 |
+
(([x],[walks(x)]) + ([y],[runs(y)]))
|
123 |
+
>>> print(dexpr(r'(([x,y],[walks(x), jumps(y)]) + (([z],[twos(z)]) + ([w],[runs(w)])))'))
|
124 |
+
(([x,y],[walks(x), jumps(y)]) + ([z],[twos(z)]) + ([w],[runs(w)]))
|
125 |
+
>>> print(dexpr(r'((([],[walks(x)]) + ([],[twos(x)])) + ([],[runs(x)]))'))
|
126 |
+
(([],[walks(x)]) + ([],[twos(x)]) + ([],[runs(x)]))
|
127 |
+
>>> print(dexpr(r'((([],[walks(x)]) + ([],[runs(x)])) + (([],[threes(x)]) + ([],[fours(x)])))'))
|
128 |
+
(([],[walks(x)]) + ([],[runs(x)]) + ([],[threes(x)]) + ([],[fours(x)]))
|
129 |
+
|
130 |
+
>>> print(dexpr(r'(([],[walks(x)]) -> ([],[runs(x)]))'))
|
131 |
+
(([],[walks(x)]) -> ([],[runs(x)]))
|
132 |
+
|
133 |
+
>>> print(dexpr(r'([x],[PRO(x), sees(John,x)])'))
|
134 |
+
([x],[PRO(x), sees(John,x)])
|
135 |
+
>>> print(dexpr(r'([x],[man(x), -([],[walks(x)])])'))
|
136 |
+
([x],[man(x), -([],[walks(x)])])
|
137 |
+
>>> print(dexpr(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])'))
|
138 |
+
([],[(([x],[man(x)]) -> ([],[walks(x)]))])
|
139 |
+
|
140 |
+
>>> print(dexpr(r'DRS([x],[walk(x)])'))
|
141 |
+
([x],[walk(x)])
|
142 |
+
>>> print(dexpr(r'DRS([x][walk(x)])'))
|
143 |
+
([x],[walk(x)])
|
144 |
+
>>> print(dexpr(r'([x][walk(x)])'))
|
145 |
+
([x],[walk(x)])
|
146 |
+
|
147 |
+
``simplify()``
|
148 |
+
--------------
|
149 |
+
|
150 |
+
>>> print(dexpr(r'\x.([],[man(x), walks(x)])(john)').simplify())
|
151 |
+
([],[man(john), walks(john)])
|
152 |
+
>>> print(dexpr(r'\x.\y.([z],[dog(z),sees(x,y)])(john)(mary)').simplify())
|
153 |
+
([z],[dog(z), sees(john,mary)])
|
154 |
+
>>> print(dexpr(r'\R x.([],[big(x,R)])(\y.([],[mouse(y)]))').simplify())
|
155 |
+
\x.([],[big(x,\y.([],[mouse(y)]))])
|
156 |
+
|
157 |
+
>>> print(dexpr(r'(([x],[walks(x)]) + ([y],[runs(y)]))').simplify())
|
158 |
+
([x,y],[walks(x), runs(y)])
|
159 |
+
>>> print(dexpr(r'(([x,y],[walks(x), jumps(y)]) + (([z],[twos(z)]) + ([w],[runs(w)])))').simplify())
|
160 |
+
([w,x,y,z],[walks(x), jumps(y), twos(z), runs(w)])
|
161 |
+
>>> print(dexpr(r'((([],[walks(x)]) + ([],[runs(x)]) + ([],[threes(x)]) + ([],[fours(x)])))').simplify())
|
162 |
+
([],[walks(x), runs(x), threes(x), fours(x)])
|
163 |
+
>>> dexpr(r'([x],[man(x)])+([x],[walks(x)])').simplify() == \
|
164 |
+
... dexpr(r'([x,z1],[man(x), walks(z1)])')
|
165 |
+
True
|
166 |
+
>>> dexpr(r'([y],[boy(y), (([x],[dog(x)]) -> ([],[chase(x,y)]))])+([x],[run(x)])').simplify() == \
|
167 |
+
... dexpr(r'([y,z1],[boy(y), (([x],[dog(x)]) -> ([],[chase(x,y)])), run(z1)])')
|
168 |
+
True
|
169 |
+
|
170 |
+
>>> dexpr(r'\Q.(([x],[john(x),walks(x)]) + Q)(([x],[PRO(x),leaves(x)]))').simplify() == \
|
171 |
+
... dexpr(r'([x,z1],[john(x), walks(x), PRO(z1), leaves(z1)])')
|
172 |
+
True
|
173 |
+
|
174 |
+
>>> logic._counter._value = 0
|
175 |
+
>>> print(dexpr('([],[(([x],[dog(x)]) -> ([e,y],[boy(y), chase(e), subj(e,x), obj(e,y)]))])+([e,x],[PRO(x), run(e), subj(e,x)])').simplify().normalize().normalize())
|
176 |
+
([e02,z5],[(([z3],[dog(z3)]) -> ([e01,z4],[boy(z4), chase(e01), subj(e01,z3), obj(e01,z4)])), PRO(z5), run(e02), subj(e02,z5)])
|
177 |
+
|
178 |
+
``fol()``
|
179 |
+
-----------
|
180 |
+
|
181 |
+
>>> print(dexpr(r'([x,y],[sees(x,y)])').fol())
|
182 |
+
exists x y.sees(x,y)
|
183 |
+
>>> print(dexpr(r'([x],[man(x), walks(x)])').fol())
|
184 |
+
exists x.(man(x) & walks(x))
|
185 |
+
>>> print(dexpr(r'\x.([],[man(x), walks(x)])').fol())
|
186 |
+
\x.(man(x) & walks(x))
|
187 |
+
>>> print(dexpr(r'\x y.([],[sees(x,y)])').fol())
|
188 |
+
\x y.sees(x,y)
|
189 |
+
|
190 |
+
>>> print(dexpr(r'\x.([],[walks(x)])(john)').fol())
|
191 |
+
\x.walks(x)(john)
|
192 |
+
>>> print(dexpr(r'\R x.([],[big(x,R)])(\y.([],[mouse(y)]))').fol())
|
193 |
+
(\R x.big(x,R))(\y.mouse(y))
|
194 |
+
|
195 |
+
>>> print(dexpr(r'(([x],[walks(x)]) + ([y],[runs(y)]))').fol())
|
196 |
+
(exists x.walks(x) & exists y.runs(y))
|
197 |
+
|
198 |
+
>>> print(dexpr(r'(([],[walks(x)]) -> ([],[runs(x)]))').fol())
|
199 |
+
(walks(x) -> runs(x))
|
200 |
+
|
201 |
+
>>> print(dexpr(r'([x],[PRO(x), sees(John,x)])').fol())
|
202 |
+
exists x.(PRO(x) & sees(John,x))
|
203 |
+
>>> print(dexpr(r'([x],[man(x), -([],[walks(x)])])').fol())
|
204 |
+
exists x.(man(x) & -walks(x))
|
205 |
+
>>> print(dexpr(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])').fol())
|
206 |
+
all x.(man(x) -> walks(x))
|
207 |
+
|
208 |
+
>>> print(dexpr(r'([x],[man(x) | walks(x)])').fol())
|
209 |
+
exists x.(man(x) | walks(x))
|
210 |
+
>>> print(dexpr(r'P(x) + ([x],[walks(x)])').fol())
|
211 |
+
(P(x) & exists x.walks(x))
|
212 |
+
|
213 |
+
``resolve_anaphora()``
|
214 |
+
----------------------
|
215 |
+
|
216 |
+
>>> from nltk.sem.drt import AnaphoraResolutionException
|
217 |
+
|
218 |
+
>>> print(resolve_anaphora(dexpr(r'([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])')))
|
219 |
+
([x,y,z],[dog(x), cat(y), walks(z), (z = [x,y])])
|
220 |
+
>>> print(resolve_anaphora(dexpr(r'([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])')))
|
221 |
+
([],[(([x],[dog(x)]) -> ([y],[walks(y), (y = x)]))])
|
222 |
+
>>> print(resolve_anaphora(dexpr(r'(([x,y],[]) + ([],[PRO(x)]))')).simplify())
|
223 |
+
([x,y],[(x = y)])
|
224 |
+
>>> try: print(resolve_anaphora(dexpr(r'([x],[walks(x), PRO(x)])')))
|
225 |
+
... except AnaphoraResolutionException as e: print(e)
|
226 |
+
Variable 'x' does not resolve to anything.
|
227 |
+
>>> print(resolve_anaphora(dexpr('([e01,z6,z7],[boy(z6), PRO(z7), run(e01), subj(e01,z7)])')))
|
228 |
+
([e01,z6,z7],[boy(z6), (z7 = z6), run(e01), subj(e01,z7)])
|
229 |
+
|
230 |
+
``equiv()``:
|
231 |
+
----------------
|
232 |
+
|
233 |
+
>>> a = dexpr(r'([x],[man(x), walks(x)])')
|
234 |
+
>>> b = dexpr(r'([x],[walks(x), man(x)])')
|
235 |
+
>>> print(a.equiv(b, TableauProver()))
|
236 |
+
True
|
237 |
+
|
238 |
+
|
239 |
+
``replace()``:
|
240 |
+
--------------
|
241 |
+
|
242 |
+
>>> a = dexpr(r'a')
|
243 |
+
>>> w = dexpr(r'w')
|
244 |
+
>>> x = dexpr(r'x')
|
245 |
+
>>> y = dexpr(r'y')
|
246 |
+
>>> z = dexpr(r'z')
|
247 |
+
|
248 |
+
|
249 |
+
replace bound
|
250 |
+
-------------
|
251 |
+
|
252 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(x.variable, a, False))
|
253 |
+
([x],[give(x,y,z)])
|
254 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(x.variable, a, True))
|
255 |
+
([a],[give(a,y,z)])
|
256 |
+
|
257 |
+
replace unbound
|
258 |
+
---------------
|
259 |
+
|
260 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, a, False))
|
261 |
+
([x],[give(x,a,z)])
|
262 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, a, True))
|
263 |
+
([x],[give(x,a,z)])
|
264 |
+
|
265 |
+
replace unbound with bound
|
266 |
+
--------------------------
|
267 |
+
|
268 |
+
>>> dexpr(r'([x],[give(x,y,z)])').replace(y.variable, x, False) == \
|
269 |
+
... dexpr('([z1],[give(z1,x,z)])')
|
270 |
+
True
|
271 |
+
>>> dexpr(r'([x],[give(x,y,z)])').replace(y.variable, x, True) == \
|
272 |
+
... dexpr('([z1],[give(z1,x,z)])')
|
273 |
+
True
|
274 |
+
|
275 |
+
replace unbound with unbound
|
276 |
+
----------------------------
|
277 |
+
|
278 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, z, False))
|
279 |
+
([x],[give(x,z,z)])
|
280 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, z, True))
|
281 |
+
([x],[give(x,z,z)])
|
282 |
+
|
283 |
+
|
284 |
+
replace unbound
|
285 |
+
---------------
|
286 |
+
|
287 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, False))
|
288 |
+
(([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
|
289 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, True))
|
290 |
+
(([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
|
291 |
+
|
292 |
+
replace bound
|
293 |
+
-------------
|
294 |
+
|
295 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(x.variable, a, False))
|
296 |
+
(([x],[P(x,y,z)]) + ([y],[Q(x,y,z)]))
|
297 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(x.variable, a, True))
|
298 |
+
(([a],[P(a,y,z)]) + ([y],[Q(a,y,z)]))
|
299 |
+
|
300 |
+
replace unbound with unbound
|
301 |
+
----------------------------
|
302 |
+
|
303 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, False))
|
304 |
+
(([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
|
305 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, True))
|
306 |
+
(([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
|
307 |
+
|
308 |
+
replace unbound with bound on same side
|
309 |
+
---------------------------------------
|
310 |
+
|
311 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(z.variable, x, False) == \
|
312 |
+
... dexpr(r'(([z1],[P(z1,y,x)]) + ([y],[Q(z1,y,w)]))')
|
313 |
+
True
|
314 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(z.variable, x, True) == \
|
315 |
+
... dexpr(r'(([z1],[P(z1,y,x)]) + ([y],[Q(z1,y,w)]))')
|
316 |
+
True
|
317 |
+
|
318 |
+
replace unbound with bound on other side
|
319 |
+
----------------------------------------
|
320 |
+
|
321 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(w.variable, x, False) == \
|
322 |
+
... dexpr(r'(([z1],[P(z1,y,z)]) + ([y],[Q(z1,y,x)]))')
|
323 |
+
True
|
324 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(w.variable, x, True) == \
|
325 |
+
... dexpr(r'(([z1],[P(z1,y,z)]) + ([y],[Q(z1,y,x)]))')
|
326 |
+
True
|
327 |
+
|
328 |
+
replace unbound with double bound
|
329 |
+
---------------------------------
|
330 |
+
|
331 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([x],[Q(x,y,w)])').replace(z.variable, x, False) == \
|
332 |
+
... dexpr(r'(([z1],[P(z1,y,x)]) + ([z1],[Q(z1,y,w)]))')
|
333 |
+
True
|
334 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([x],[Q(x,y,w)])').replace(z.variable, x, True) == \
|
335 |
+
... dexpr(r'(([z1],[P(z1,y,x)]) + ([z1],[Q(z1,y,w)]))')
|
336 |
+
True
|
337 |
+
|
338 |
+
|
339 |
+
regression tests
|
340 |
+
----------------
|
341 |
+
|
342 |
+
>>> d = dexpr('([x],[A(c), ([y],[B(x,y,z,a)])->([z],[C(x,y,z,a)])])')
|
343 |
+
>>> print(d)
|
344 |
+
([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
|
345 |
+
>>> print(d.pretty_format())
|
346 |
+
____________________________________
|
347 |
+
| x |
|
348 |
+
|------------------------------------|
|
349 |
+
| A(c) |
|
350 |
+
| ____________ ____________ |
|
351 |
+
| | y | | z | |
|
352 |
+
| (|------------| -> |------------|) |
|
353 |
+
| | B(x,y,z,a) | | C(x,y,z,a) | |
|
354 |
+
| |____________| |____________| |
|
355 |
+
|____________________________________|
|
356 |
+
>>> print(str(d))
|
357 |
+
([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
|
358 |
+
>>> print(d.fol())
|
359 |
+
exists x.(A(c) & all y.(B(x,y,z,a) -> exists z.C(x,y,z,a)))
|
360 |
+
>>> print(d.replace(Variable('a'), DrtVariableExpression(Variable('r'))))
|
361 |
+
([x],[A(c), (([y],[B(x,y,z,r)]) -> ([z],[C(x,y,z,r)]))])
|
362 |
+
>>> print(d.replace(Variable('x'), DrtVariableExpression(Variable('r'))))
|
363 |
+
([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
|
364 |
+
>>> print(d.replace(Variable('y'), DrtVariableExpression(Variable('r'))))
|
365 |
+
([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
|
366 |
+
>>> print(d.replace(Variable('z'), DrtVariableExpression(Variable('r'))))
|
367 |
+
([x],[A(c), (([y],[B(x,y,r,a)]) -> ([z],[C(x,y,z,a)]))])
|
368 |
+
>>> print(d.replace(Variable('x'), DrtVariableExpression(Variable('r')), True))
|
369 |
+
([r],[A(c), (([y],[B(r,y,z,a)]) -> ([z],[C(r,y,z,a)]))])
|
370 |
+
>>> print(d.replace(Variable('y'), DrtVariableExpression(Variable('r')), True))
|
371 |
+
([x],[A(c), (([r],[B(x,r,z,a)]) -> ([z],[C(x,r,z,a)]))])
|
372 |
+
>>> print(d.replace(Variable('z'), DrtVariableExpression(Variable('r')), True))
|
373 |
+
([x],[A(c), (([y],[B(x,y,r,a)]) -> ([r],[C(x,y,r,a)]))])
|
374 |
+
>>> print(d == dexpr('([l],[A(c), ([m],[B(l,m,z,a)])->([n],[C(l,m,n,a)])])'))
|
375 |
+
True
|
376 |
+
>>> d = dexpr('([],[([x,y],[B(x,y,h), ([a,b],[dee(x,a,g)])])->([z,w],[cee(x,y,f), ([c,d],[E(x,c,d,e)])])])')
|
377 |
+
>>> sorted(d.free())
|
378 |
+
[Variable('B'), Variable('E'), Variable('e'), Variable('f'), Variable('g'), Variable('h')]
|
379 |
+
>>> sorted(d.variables())
|
380 |
+
[Variable('B'), Variable('E'), Variable('e'), Variable('f'), Variable('g'), Variable('h')]
|
381 |
+
>>> sorted(d.get_refs(True))
|
382 |
+
[Variable('a'), Variable('b'), Variable('c'), Variable('d'), Variable('w'), Variable('x'), Variable('y'), Variable('z')]
|
383 |
+
>>> sorted(d.conds[0].get_refs(False))
|
384 |
+
[Variable('x'), Variable('y')]
|
385 |
+
>>> print(dexpr('([x,y],[A(x,y), (x=y), ([],[B(x,y)])->([],[C(x,y)]), ([x,y],[D(x,y)])->([],[E(x,y)]), ([],[F(x,y)])->([x,y],[G(x,y)])])').eliminate_equality())
|
386 |
+
([x],[A(x,x), (([],[B(x,x)]) -> ([],[C(x,x)])), (([x,y],[D(x,y)]) -> ([],[E(x,y)])), (([],[F(x,x)]) -> ([x,y],[G(x,y)]))])
|
387 |
+
>>> print(dexpr('([x,y],[A(x,y), (x=y)]) -> ([],[B(x,y)])').eliminate_equality())
|
388 |
+
(([x],[A(x,x)]) -> ([],[B(x,x)]))
|
389 |
+
>>> print(dexpr('([x,y],[A(x,y)]) -> ([],[B(x,y), (x=y)])').eliminate_equality())
|
390 |
+
(([x,y],[A(x,y)]) -> ([],[B(x,x)]))
|
391 |
+
>>> print(dexpr('([x,y],[A(x,y), (x=y), ([],[B(x,y)])])').eliminate_equality())
|
392 |
+
([x],[A(x,x), ([],[B(x,x)])])
|
393 |
+
>>> print(dexpr('([x,y],[A(x,y), ([],[B(x,y), (x=y)])])').eliminate_equality())
|
394 |
+
([x,y],[A(x,y), ([],[B(x,x)])])
|
395 |
+
>>> print(dexpr('([z8 z9 z10],[A(z8), z8=z10, z9=z10, B(z9), C(z10), D(z10)])').eliminate_equality())
|
396 |
+
([z9],[A(z9), B(z9), C(z9), D(z9)])
|
397 |
+
|
398 |
+
>>> print(dexpr('([x,y],[A(x,y), (x=y), ([],[B(x,y)]), ([x,y],[C(x,y)])])').eliminate_equality())
|
399 |
+
([x],[A(x,x), ([],[B(x,x)]), ([x,y],[C(x,y)])])
|
400 |
+
>>> print(dexpr('([x,y],[A(x,y)]) + ([],[B(x,y), (x=y)]) + ([],[C(x,y)])').eliminate_equality())
|
401 |
+
([x],[A(x,x), B(x,x), C(x,x)])
|
402 |
+
>>> print(dexpr('([x,y],[B(x,y)])+([x,y],[C(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))))
|
403 |
+
(([x,y],[B(x,y)]) + ([x,y],[C(x,y)]))
|
404 |
+
>>> print(dexpr('(([x,y],[B(x,y)])+([],[C(x,y)]))+([],[D(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))))
|
405 |
+
(([x,y],[B(x,y)]) + ([],[C(x,y)]) + ([],[D(x,y)]))
|
406 |
+
>>> print(dexpr('(([],[B(x,y)])+([],[C(x,y)]))+([],[D(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))))
|
407 |
+
(([],[B(x,x)]) + ([],[C(x,x)]) + ([],[D(x,x)]))
|
408 |
+
>>> print(dexpr('(([],[B(x,y), ([x,y],[A(x,y)])])+([],[C(x,y)]))+([],[D(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))).normalize())
|
409 |
+
(([],[B(z3,z1), ([z2,z3],[A(z3,z2)])]) + ([],[C(z3,z1)]) + ([],[D(z3,z1)]))
|
410 |
+
|
411 |
+
|
412 |
+
Parse errors
|
413 |
+
============
|
414 |
+
|
415 |
+
>>> def parse_error(drtstring):
|
416 |
+
... try: dexpr(drtstring)
|
417 |
+
... except logic.LogicalExpressionException as e: print(e)
|
418 |
+
|
419 |
+
>>> parse_error(r'')
|
420 |
+
End of input found. Expression expected.
|
421 |
+
<BLANKLINE>
|
422 |
+
^
|
423 |
+
>>> parse_error(r'(')
|
424 |
+
End of input found. Expression expected.
|
425 |
+
(
|
426 |
+
^
|
427 |
+
>>> parse_error(r'()')
|
428 |
+
Unexpected token: ')'. Expression expected.
|
429 |
+
()
|
430 |
+
^
|
431 |
+
>>> parse_error(r'([')
|
432 |
+
End of input found. Expected token ']'.
|
433 |
+
([
|
434 |
+
^
|
435 |
+
>>> parse_error(r'([,')
|
436 |
+
',' is an illegal variable name. Constants may not be quantified.
|
437 |
+
([,
|
438 |
+
^
|
439 |
+
>>> parse_error(r'([x,')
|
440 |
+
End of input found. Variable expected.
|
441 |
+
([x,
|
442 |
+
^
|
443 |
+
>>> parse_error(r'([]')
|
444 |
+
End of input found. Expected token '['.
|
445 |
+
([]
|
446 |
+
^
|
447 |
+
>>> parse_error(r'([][')
|
448 |
+
End of input found. Expected token ']'.
|
449 |
+
([][
|
450 |
+
^
|
451 |
+
>>> parse_error(r'([][,')
|
452 |
+
Unexpected token: ','. Expression expected.
|
453 |
+
([][,
|
454 |
+
^
|
455 |
+
>>> parse_error(r'([][]')
|
456 |
+
End of input found. Expected token ')'.
|
457 |
+
([][]
|
458 |
+
^
|
459 |
+
>>> parse_error(r'([x][man(x)]) |')
|
460 |
+
End of input found. Expression expected.
|
461 |
+
([x][man(x)]) |
|
462 |
+
^
|
463 |
+
|
464 |
+
Pretty Printing
|
465 |
+
===============
|
466 |
+
|
467 |
+
>>> dexpr(r"([],[])").pretty_print()
|
468 |
+
__
|
469 |
+
| |
|
470 |
+
|--|
|
471 |
+
|__|
|
472 |
+
|
473 |
+
>>> dexpr(r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])").pretty_print()
|
474 |
+
_____________________________
|
475 |
+
| |
|
476 |
+
|-----------------------------|
|
477 |
+
| ________ _________ |
|
478 |
+
| | x | | | |
|
479 |
+
| (|--------| -> |---------|) |
|
480 |
+
| | big(x) | | bark(x) | |
|
481 |
+
| | dog(x) | |_________| |
|
482 |
+
| |________| |
|
483 |
+
| _________ |
|
484 |
+
| | x | |
|
485 |
+
| __ |---------| |
|
486 |
+
| | | walk(x) | |
|
487 |
+
| |_________| |
|
488 |
+
|_____________________________|
|
489 |
+
|
490 |
+
>>> dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pretty_print()
|
491 |
+
_________ _________
|
492 |
+
| x y | | z |
|
493 |
+
(|---------| + |---------|)
|
494 |
+
| (x = y) | | dog(z) |
|
495 |
+
|_________| | walk(z) |
|
496 |
+
|_________|
|
497 |
+
|
498 |
+
>>> dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pretty_print()
|
499 |
+
_______________________________
|
500 |
+
| |
|
501 |
+
|-------------------------------|
|
502 |
+
| ___ ___ _________ |
|
503 |
+
| | x | | y | | z | |
|
504 |
+
| (|---| | |---| | |---------|) |
|
505 |
+
| |___| |___| | dog(z) | |
|
506 |
+
| | walk(z) | |
|
507 |
+
| |_________| |
|
508 |
+
|_______________________________|
|
509 |
+
|
510 |
+
>>> dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pretty_print()
|
511 |
+
___ ________
|
512 |
+
\ | x | \ | |
|
513 |
+
/\ P Q.(|---| + P(x) + Q(x))( /\ x.|--------|)
|
514 |
+
|___| | dog(x) |
|
515 |
+
|________|
|
lib/python3.10/site-packages/nltk/test/gensim_fixt.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def setup_module():
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
pytest.importorskip("gensim")
|
lib/python3.10/site-packages/nltk/test/gluesemantics.doctest
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==============================================================================
|
5 |
+
Glue Semantics
|
6 |
+
==============================================================================
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
======================
|
11 |
+
Linear logic
|
12 |
+
======================
|
13 |
+
|
14 |
+
>>> from nltk.sem import logic
|
15 |
+
>>> from nltk.sem.glue import *
|
16 |
+
>>> from nltk.sem.linearlogic import *
|
17 |
+
|
18 |
+
>>> from nltk.sem.linearlogic import Expression
|
19 |
+
>>> read_expr = Expression.fromstring
|
20 |
+
|
21 |
+
Parser
|
22 |
+
|
23 |
+
>>> print(read_expr(r'f'))
|
24 |
+
f
|
25 |
+
>>> print(read_expr(r'(g -o f)'))
|
26 |
+
(g -o f)
|
27 |
+
>>> print(read_expr(r'(g -o (h -o f))'))
|
28 |
+
(g -o (h -o f))
|
29 |
+
>>> print(read_expr(r'((g -o G) -o G)'))
|
30 |
+
((g -o G) -o G)
|
31 |
+
>>> print(read_expr(r'(g -o f)(g)'))
|
32 |
+
(g -o f)(g)
|
33 |
+
>>> print(read_expr(r'((g -o G) -o G)((g -o f))'))
|
34 |
+
((g -o G) -o G)((g -o f))
|
35 |
+
|
36 |
+
Simplify
|
37 |
+
|
38 |
+
>>> print(read_expr(r'f').simplify())
|
39 |
+
f
|
40 |
+
>>> print(read_expr(r'(g -o f)').simplify())
|
41 |
+
(g -o f)
|
42 |
+
>>> print(read_expr(r'((g -o G) -o G)').simplify())
|
43 |
+
((g -o G) -o G)
|
44 |
+
>>> print(read_expr(r'(g -o f)(g)').simplify())
|
45 |
+
f
|
46 |
+
>>> try: read_expr(r'(g -o f)(f)').simplify()
|
47 |
+
... except LinearLogicApplicationException as e: print(e)
|
48 |
+
...
|
49 |
+
Cannot apply (g -o f) to f. Cannot unify g with f given {}
|
50 |
+
>>> print(read_expr(r'(G -o f)(g)').simplify())
|
51 |
+
f
|
52 |
+
>>> print(read_expr(r'((g -o G) -o G)((g -o f))').simplify())
|
53 |
+
f
|
54 |
+
|
55 |
+
Test BindingDict
|
56 |
+
|
57 |
+
>>> h = ConstantExpression('h')
|
58 |
+
>>> g = ConstantExpression('g')
|
59 |
+
>>> f = ConstantExpression('f')
|
60 |
+
|
61 |
+
>>> H = VariableExpression('H')
|
62 |
+
>>> G = VariableExpression('G')
|
63 |
+
>>> F = VariableExpression('F')
|
64 |
+
|
65 |
+
>>> d1 = BindingDict({H: h})
|
66 |
+
>>> d2 = BindingDict({F: f, G: F})
|
67 |
+
>>> d12 = d1 + d2
|
68 |
+
>>> all12 = ['%s: %s' % (v, d12[v]) for v in d12.d]
|
69 |
+
>>> all12.sort()
|
70 |
+
>>> print(all12)
|
71 |
+
['F: f', 'G: f', 'H: h']
|
72 |
+
|
73 |
+
>>> BindingDict([(F,f),(G,g),(H,h)]) == BindingDict({F:f, G:g, H:h})
|
74 |
+
True
|
75 |
+
|
76 |
+
>>> d4 = BindingDict({F: f})
|
77 |
+
>>> try: d4[F] = g
|
78 |
+
... except VariableBindingException as e: print(e)
|
79 |
+
Variable F already bound to another value
|
80 |
+
|
81 |
+
Test Unify
|
82 |
+
|
83 |
+
>>> try: f.unify(g, BindingDict())
|
84 |
+
... except UnificationException as e: print(e)
|
85 |
+
...
|
86 |
+
Cannot unify f with g given {}
|
87 |
+
|
88 |
+
>>> f.unify(G, BindingDict()) == BindingDict({G: f})
|
89 |
+
True
|
90 |
+
>>> try: f.unify(G, BindingDict({G: h}))
|
91 |
+
... except UnificationException as e: print(e)
|
92 |
+
...
|
93 |
+
Cannot unify f with G given {G: h}
|
94 |
+
>>> f.unify(G, BindingDict({G: f})) == BindingDict({G: f})
|
95 |
+
True
|
96 |
+
>>> f.unify(G, BindingDict({H: f})) == BindingDict({G: f, H: f})
|
97 |
+
True
|
98 |
+
|
99 |
+
>>> G.unify(f, BindingDict()) == BindingDict({G: f})
|
100 |
+
True
|
101 |
+
>>> try: G.unify(f, BindingDict({G: h}))
|
102 |
+
... except UnificationException as e: print(e)
|
103 |
+
...
|
104 |
+
Cannot unify G with f given {G: h}
|
105 |
+
>>> G.unify(f, BindingDict({G: f})) == BindingDict({G: f})
|
106 |
+
True
|
107 |
+
>>> G.unify(f, BindingDict({H: f})) == BindingDict({G: f, H: f})
|
108 |
+
True
|
109 |
+
|
110 |
+
>>> G.unify(F, BindingDict()) == BindingDict({G: F})
|
111 |
+
True
|
112 |
+
>>> try: G.unify(F, BindingDict({G: H}))
|
113 |
+
... except UnificationException as e: print(e)
|
114 |
+
...
|
115 |
+
Cannot unify G with F given {G: H}
|
116 |
+
>>> G.unify(F, BindingDict({G: F})) == BindingDict({G: F})
|
117 |
+
True
|
118 |
+
>>> G.unify(F, BindingDict({H: F})) == BindingDict({G: F, H: F})
|
119 |
+
True
|
120 |
+
|
121 |
+
Test Compile
|
122 |
+
|
123 |
+
>>> print(read_expr('g').compile_pos(Counter(), GlueFormula))
|
124 |
+
(<ConstantExpression g>, [])
|
125 |
+
>>> print(read_expr('(g -o f)').compile_pos(Counter(), GlueFormula))
|
126 |
+
(<ImpExpression (g -o f)>, [])
|
127 |
+
>>> print(read_expr('(g -o (h -o f))').compile_pos(Counter(), GlueFormula))
|
128 |
+
(<ImpExpression (g -o (h -o f))>, [])
|
129 |
+
|
130 |
+
|
131 |
+
======================
|
132 |
+
Glue
|
133 |
+
======================
|
134 |
+
|
135 |
+
Demo of "John walks"
|
136 |
+
--------------------
|
137 |
+
|
138 |
+
>>> john = GlueFormula("John", "g")
|
139 |
+
>>> print(john)
|
140 |
+
John : g
|
141 |
+
>>> walks = GlueFormula(r"\x.walks(x)", "(g -o f)")
|
142 |
+
>>> print(walks)
|
143 |
+
\x.walks(x) : (g -o f)
|
144 |
+
>>> print(walks.applyto(john))
|
145 |
+
\x.walks(x)(John) : (g -o f)(g)
|
146 |
+
>>> print(walks.applyto(john).simplify())
|
147 |
+
walks(John) : f
|
148 |
+
|
149 |
+
|
150 |
+
Demo of "A dog walks"
|
151 |
+
---------------------
|
152 |
+
|
153 |
+
>>> a = GlueFormula("\\P Q.some x.(P(x) and Q(x))", "((gv -o gr) -o ((g -o G) -o G))")
|
154 |
+
>>> print(a)
|
155 |
+
\P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
|
156 |
+
>>> man = GlueFormula(r"\x.man(x)", "(gv -o gr)")
|
157 |
+
>>> print(man)
|
158 |
+
\x.man(x) : (gv -o gr)
|
159 |
+
>>> walks = GlueFormula(r"\x.walks(x)", "(g -o f)")
|
160 |
+
>>> print(walks)
|
161 |
+
\x.walks(x) : (g -o f)
|
162 |
+
>>> a_man = a.applyto(man)
|
163 |
+
>>> print(a_man.simplify())
|
164 |
+
\Q.exists x.(man(x) & Q(x)) : ((g -o G) -o G)
|
165 |
+
>>> a_man_walks = a_man.applyto(walks)
|
166 |
+
>>> print(a_man_walks.simplify())
|
167 |
+
exists x.(man(x) & walks(x)) : f
|
168 |
+
|
169 |
+
|
170 |
+
Demo of 'every girl chases a dog'
|
171 |
+
---------------------------------
|
172 |
+
|
173 |
+
Individual words:
|
174 |
+
|
175 |
+
>>> every = GlueFormula("\\P Q.all x.(P(x) -> Q(x))", "((gv -o gr) -o ((g -o G) -o G))")
|
176 |
+
>>> print(every)
|
177 |
+
\P Q.all x.(P(x) -> Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
|
178 |
+
>>> girl = GlueFormula(r"\x.girl(x)", "(gv -o gr)")
|
179 |
+
>>> print(girl)
|
180 |
+
\x.girl(x) : (gv -o gr)
|
181 |
+
>>> chases = GlueFormula(r"\x y.chases(x,y)", "(g -o (h -o f))")
|
182 |
+
>>> print(chases)
|
183 |
+
\x y.chases(x,y) : (g -o (h -o f))
|
184 |
+
>>> a = GlueFormula("\\P Q.some x.(P(x) and Q(x))", "((hv -o hr) -o ((h -o H) -o H))")
|
185 |
+
>>> print(a)
|
186 |
+
\P Q.exists x.(P(x) & Q(x)) : ((hv -o hr) -o ((h -o H) -o H))
|
187 |
+
>>> dog = GlueFormula(r"\x.dog(x)", "(hv -o hr)")
|
188 |
+
>>> print(dog)
|
189 |
+
\x.dog(x) : (hv -o hr)
|
190 |
+
|
191 |
+
Noun Quantification can only be done one way:
|
192 |
+
|
193 |
+
>>> every_girl = every.applyto(girl)
|
194 |
+
>>> print(every_girl.simplify())
|
195 |
+
\Q.all x.(girl(x) -> Q(x)) : ((g -o G) -o G)
|
196 |
+
>>> a_dog = a.applyto(dog)
|
197 |
+
>>> print(a_dog.simplify())
|
198 |
+
\Q.exists x.(dog(x) & Q(x)) : ((h -o H) -o H)
|
199 |
+
|
200 |
+
The first reading is achieved by combining 'chases' with 'a dog' first.
|
201 |
+
Since 'a girl' requires something of the form '(h -o H)' we must
|
202 |
+
get rid of the 'g' in the glue of 'see'. We will do this with
|
203 |
+
the '-o elimination' rule. So, x1 will be our subject placeholder.
|
204 |
+
|
205 |
+
>>> xPrime = GlueFormula("x1", "g")
|
206 |
+
>>> print(xPrime)
|
207 |
+
x1 : g
|
208 |
+
>>> xPrime_chases = chases.applyto(xPrime)
|
209 |
+
>>> print(xPrime_chases.simplify())
|
210 |
+
\y.chases(x1,y) : (h -o f)
|
211 |
+
>>> xPrime_chases_a_dog = a_dog.applyto(xPrime_chases)
|
212 |
+
>>> print(xPrime_chases_a_dog.simplify())
|
213 |
+
exists x.(dog(x) & chases(x1,x)) : f
|
214 |
+
|
215 |
+
Now we can retract our subject placeholder using lambda-abstraction and
|
216 |
+
combine with the true subject.
|
217 |
+
|
218 |
+
>>> chases_a_dog = xPrime_chases_a_dog.lambda_abstract(xPrime)
|
219 |
+
>>> print(chases_a_dog.simplify())
|
220 |
+
\x1.exists x.(dog(x) & chases(x1,x)) : (g -o f)
|
221 |
+
>>> every_girl_chases_a_dog = every_girl.applyto(chases_a_dog)
|
222 |
+
>>> r1 = every_girl_chases_a_dog.simplify()
|
223 |
+
>>> r2 = GlueFormula(r'all x.(girl(x) -> exists z1.(dog(z1) & chases(x,z1)))', 'f')
|
224 |
+
>>> r1 == r2
|
225 |
+
True
|
226 |
+
|
227 |
+
The second reading is achieved by combining 'every girl' with 'chases' first.
|
228 |
+
|
229 |
+
>>> xPrime = GlueFormula("x1", "g")
|
230 |
+
>>> print(xPrime)
|
231 |
+
x1 : g
|
232 |
+
>>> xPrime_chases = chases.applyto(xPrime)
|
233 |
+
>>> print(xPrime_chases.simplify())
|
234 |
+
\y.chases(x1,y) : (h -o f)
|
235 |
+
>>> yPrime = GlueFormula("x2", "h")
|
236 |
+
>>> print(yPrime)
|
237 |
+
x2 : h
|
238 |
+
>>> xPrime_chases_yPrime = xPrime_chases.applyto(yPrime)
|
239 |
+
>>> print(xPrime_chases_yPrime.simplify())
|
240 |
+
chases(x1,x2) : f
|
241 |
+
>>> chases_yPrime = xPrime_chases_yPrime.lambda_abstract(xPrime)
|
242 |
+
>>> print(chases_yPrime.simplify())
|
243 |
+
\x1.chases(x1,x2) : (g -o f)
|
244 |
+
>>> every_girl_chases_yPrime = every_girl.applyto(chases_yPrime)
|
245 |
+
>>> print(every_girl_chases_yPrime.simplify())
|
246 |
+
all x.(girl(x) -> chases(x,x2)) : f
|
247 |
+
>>> every_girl_chases = every_girl_chases_yPrime.lambda_abstract(yPrime)
|
248 |
+
>>> print(every_girl_chases.simplify())
|
249 |
+
\x2.all x.(girl(x) -> chases(x,x2)) : (h -o f)
|
250 |
+
>>> every_girl_chases_a_dog = a_dog.applyto(every_girl_chases)
|
251 |
+
>>> r1 = every_girl_chases_a_dog.simplify()
|
252 |
+
>>> r2 = GlueFormula(r'exists x.(dog(x) & all z2.(girl(z2) -> chases(z2,x)))', 'f')
|
253 |
+
>>> r1 == r2
|
254 |
+
True
|
255 |
+
|
256 |
+
|
257 |
+
Compilation
|
258 |
+
-----------
|
259 |
+
|
260 |
+
>>> for cp in GlueFormula('m', '(b -o a)').compile(Counter()): print(cp)
|
261 |
+
m : (b -o a) : {1}
|
262 |
+
>>> for cp in GlueFormula('m', '((c -o b) -o a)').compile(Counter()): print(cp)
|
263 |
+
v1 : c : {1}
|
264 |
+
m : (b[1] -o a) : {2}
|
265 |
+
>>> for cp in GlueFormula('m', '((d -o (c -o b)) -o a)').compile(Counter()): print(cp)
|
266 |
+
v1 : c : {1}
|
267 |
+
v2 : d : {2}
|
268 |
+
m : (b[1, 2] -o a) : {3}
|
269 |
+
>>> for cp in GlueFormula('m', '((d -o e) -o ((c -o b) -o a))').compile(Counter()): print(cp)
|
270 |
+
v1 : d : {1}
|
271 |
+
v2 : c : {2}
|
272 |
+
m : (e[1] -o (b[2] -o a)) : {3}
|
273 |
+
>>> for cp in GlueFormula('m', '(((d -o c) -o b) -o a)').compile(Counter()): print(cp)
|
274 |
+
v1 : (d -o c) : {1}
|
275 |
+
m : (b[1] -o a) : {2}
|
276 |
+
>>> for cp in GlueFormula('m', '((((e -o d) -o c) -o b) -o a)').compile(Counter()): print(cp)
|
277 |
+
v1 : e : {1}
|
278 |
+
v2 : (d[1] -o c) : {2}
|
279 |
+
m : (b[2] -o a) : {3}
|
280 |
+
|
281 |
+
|
282 |
+
Demo of 'a man walks' using Compilation
|
283 |
+
---------------------------------------
|
284 |
+
|
285 |
+
Premises
|
286 |
+
|
287 |
+
>>> a = GlueFormula('\\P Q.some x.(P(x) and Q(x))', '((gv -o gr) -o ((g -o G) -o G))')
|
288 |
+
>>> print(a)
|
289 |
+
\P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
|
290 |
+
|
291 |
+
>>> man = GlueFormula('\\x.man(x)', '(gv -o gr)')
|
292 |
+
>>> print(man)
|
293 |
+
\x.man(x) : (gv -o gr)
|
294 |
+
|
295 |
+
>>> walks = GlueFormula('\\x.walks(x)', '(g -o f)')
|
296 |
+
>>> print(walks)
|
297 |
+
\x.walks(x) : (g -o f)
|
298 |
+
|
299 |
+
Compiled Premises:
|
300 |
+
|
301 |
+
>>> counter = Counter()
|
302 |
+
>>> ahc = a.compile(counter)
|
303 |
+
>>> g1 = ahc[0]
|
304 |
+
>>> print(g1)
|
305 |
+
v1 : gv : {1}
|
306 |
+
>>> g2 = ahc[1]
|
307 |
+
>>> print(g2)
|
308 |
+
v2 : g : {2}
|
309 |
+
>>> g3 = ahc[2]
|
310 |
+
>>> print(g3)
|
311 |
+
\P Q.exists x.(P(x) & Q(x)) : (gr[1] -o (G[2] -o G)) : {3}
|
312 |
+
>>> g4 = man.compile(counter)[0]
|
313 |
+
>>> print(g4)
|
314 |
+
\x.man(x) : (gv -o gr) : {4}
|
315 |
+
>>> g5 = walks.compile(counter)[0]
|
316 |
+
>>> print(g5)
|
317 |
+
\x.walks(x) : (g -o f) : {5}
|
318 |
+
|
319 |
+
Derivation:
|
320 |
+
|
321 |
+
>>> g14 = g4.applyto(g1)
|
322 |
+
>>> print(g14.simplify())
|
323 |
+
man(v1) : gr : {1, 4}
|
324 |
+
>>> g134 = g3.applyto(g14)
|
325 |
+
>>> print(g134.simplify())
|
326 |
+
\Q.exists x.(man(x) & Q(x)) : (G[2] -o G) : {1, 3, 4}
|
327 |
+
>>> g25 = g5.applyto(g2)
|
328 |
+
>>> print(g25.simplify())
|
329 |
+
walks(v2) : f : {2, 5}
|
330 |
+
>>> g12345 = g134.applyto(g25)
|
331 |
+
>>> print(g12345.simplify())
|
332 |
+
exists x.(man(x) & walks(x)) : f : {1, 2, 3, 4, 5}
|
333 |
+
|
334 |
+
---------------------------------
|
335 |
+
Dependency Graph to Glue Formulas
|
336 |
+
---------------------------------
|
337 |
+
>>> from nltk.corpus.reader.dependency import DependencyGraph
|
338 |
+
|
339 |
+
>>> depgraph = DependencyGraph("""1 John _ NNP NNP _ 2 SUBJ _ _
|
340 |
+
... 2 sees _ VB VB _ 0 ROOT _ _
|
341 |
+
... 3 a _ ex_quant ex_quant _ 4 SPEC _ _
|
342 |
+
... 4 dog _ NN NN _ 2 OBJ _ _
|
343 |
+
... """)
|
344 |
+
>>> gfl = GlueDict('nltk:grammars/sample_grammars/glue.semtype').to_glueformula_list(depgraph)
|
345 |
+
>>> print(gfl) # doctest: +SKIP
|
346 |
+
[\x y.sees(x,y) : (f -o (i -o g)),
|
347 |
+
\x.dog(x) : (iv -o ir),
|
348 |
+
\P Q.exists x.(P(x) & Q(x)) : ((iv -o ir) -o ((i -o I3) -o I3)),
|
349 |
+
\P Q.exists x.(P(x) & Q(x)) : ((fv -o fr) -o ((f -o F4) -o F4)),
|
350 |
+
\x.John(x) : (fv -o fr)]
|
351 |
+
>>> glue = Glue()
|
352 |
+
>>> for r in sorted([r.simplify().normalize() for r in glue.get_readings(glue.gfl_to_compiled(gfl))], key=str):
|
353 |
+
... print(r)
|
354 |
+
exists z1.(John(z1) & exists z2.(dog(z2) & sees(z1,z2)))
|
355 |
+
exists z1.(dog(z1) & exists z2.(John(z2) & sees(z2,z1)))
|
356 |
+
|
357 |
+
-----------------------------------
|
358 |
+
Dependency Graph to LFG f-structure
|
359 |
+
-----------------------------------
|
360 |
+
>>> from nltk.sem.lfg import FStructure
|
361 |
+
|
362 |
+
>>> fstruct = FStructure.read_depgraph(depgraph)
|
363 |
+
|
364 |
+
>>> print(fstruct) # doctest: +SKIP
|
365 |
+
f:[pred 'sees'
|
366 |
+
obj h:[pred 'dog'
|
367 |
+
spec 'a']
|
368 |
+
subj g:[pred 'John']]
|
369 |
+
|
370 |
+
>>> fstruct.to_depgraph().tree().pprint()
|
371 |
+
(sees (dog a) John)
|
372 |
+
|
373 |
+
---------------------------------
|
374 |
+
LFG f-structure to Glue
|
375 |
+
---------------------------------
|
376 |
+
>>> fstruct.to_glueformula_list(GlueDict('nltk:grammars/sample_grammars/glue.semtype')) # doctest: +SKIP
|
377 |
+
[\x y.sees(x,y) : (i -o (g -o f)),
|
378 |
+
\x.dog(x) : (gv -o gr),
|
379 |
+
\P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G3) -o G3)),
|
380 |
+
\P Q.exists x.(P(x) & Q(x)) : ((iv -o ir) -o ((i -o I4) -o I4)),
|
381 |
+
\x.John(x) : (iv -o ir)]
|
382 |
+
|
383 |
+
.. see gluesemantics_malt.doctest for more
|
lib/python3.10/site-packages/nltk/test/gluesemantics_malt.doctest
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
.. see also: gluesemantics.doctest
|
5 |
+
|
6 |
+
==============================================================================
|
7 |
+
Glue Semantics
|
8 |
+
==============================================================================
|
9 |
+
|
10 |
+
>>> from nltk.test.gluesemantics_malt_fixt import setup_module
|
11 |
+
>>> setup_module()
|
12 |
+
|
13 |
+
>>> from nltk.sem.glue import *
|
14 |
+
>>> nltk.sem.logic._counter._value = 0
|
15 |
+
|
16 |
+
--------------------------------
|
17 |
+
Initialize the Dependency Parser
|
18 |
+
--------------------------------
|
19 |
+
>>> from nltk.parse.malt import MaltParser
|
20 |
+
|
21 |
+
>>> tagger = RegexpTagger(
|
22 |
+
... [('^(John|Mary)$', 'NNP'),
|
23 |
+
... ('^(sees|chases)$', 'VB'),
|
24 |
+
... ('^(a)$', 'ex_quant'),
|
25 |
+
... ('^(every)$', 'univ_quant'),
|
26 |
+
... ('^(girl|dog)$', 'NN')
|
27 |
+
... ]).tag
|
28 |
+
>>> depparser = MaltParser(tagger=tagger)
|
29 |
+
|
30 |
+
--------------------
|
31 |
+
Automated Derivation
|
32 |
+
--------------------
|
33 |
+
>>> glue = Glue(depparser=depparser)
|
34 |
+
>>> readings = glue.parse_to_meaning('every girl chases a dog'.split())
|
35 |
+
>>> for reading in sorted([r.simplify().normalize() for r in readings], key=str):
|
36 |
+
... print(reading.normalize())
|
37 |
+
all z1.(girl(z1) -> exists z2.(dog(z2) & chases(z1,z2)))
|
38 |
+
exists z1.(dog(z1) & all z2.(girl(z2) -> chases(z2,z1)))
|
39 |
+
|
40 |
+
>>> drtglue = DrtGlue(depparser=depparser)
|
41 |
+
>>> readings = drtglue.parse_to_meaning('every girl chases a dog'.split())
|
42 |
+
>>> for reading in sorted([r.simplify().normalize() for r in readings], key=str):
|
43 |
+
... print(reading)
|
44 |
+
([],[(([z1],[girl(z1)]) -> ([z2],[dog(z2), chases(z1,z2)]))])
|
45 |
+
([z1],[dog(z1), (([z2],[girl(z2)]) -> ([],[chases(z2,z1)]))])
|
46 |
+
|
47 |
+
--------------
|
48 |
+
With inference
|
49 |
+
--------------
|
50 |
+
|
51 |
+
Checking for equality of two DRSs is very useful when generating readings of a sentence.
|
52 |
+
For example, the ``glue`` module generates two readings for the sentence
|
53 |
+
*John sees Mary*:
|
54 |
+
|
55 |
+
>>> from nltk.sem.glue import DrtGlue
|
56 |
+
>>> readings = drtglue.parse_to_meaning('John sees Mary'.split())
|
57 |
+
>>> for drs in sorted([r.simplify().normalize() for r in readings], key=str):
|
58 |
+
... print(drs)
|
59 |
+
([z1,z2],[John(z1), Mary(z2), sees(z1,z2)])
|
60 |
+
([z1,z2],[Mary(z1), John(z2), sees(z2,z1)])
|
61 |
+
|
62 |
+
However, it is easy to tell that these two readings are logically the
|
63 |
+
same, and therefore one of them is superfluous. We can use the theorem prover
|
64 |
+
to determine this equivalence, and then delete one of them. A particular
|
65 |
+
theorem prover may be specified, or the argument may be left off to use the
|
66 |
+
default.
|
67 |
+
|
68 |
+
>>> readings[0].equiv(readings[1])
|
69 |
+
True
|
lib/python3.10/site-packages/nltk/test/index.doctest
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
.. _align howto: align.html
|
5 |
+
.. _ccg howto: ccg.html
|
6 |
+
.. _chat80 howto: chat80.html
|
7 |
+
.. _childes howto: childes.html
|
8 |
+
.. _chunk howto: chunk.html
|
9 |
+
.. _classify howto: classify.html
|
10 |
+
.. _collocations howto: collocations.html
|
11 |
+
.. _compat howto: compat.html
|
12 |
+
.. _corpus howto: corpus.html
|
13 |
+
.. _data howto: data.html
|
14 |
+
.. _dependency howto: dependency.html
|
15 |
+
.. _discourse howto: discourse.html
|
16 |
+
.. _drt howto: drt.html
|
17 |
+
.. _featgram howto: featgram.html
|
18 |
+
.. _featstruct howto: featstruct.html
|
19 |
+
.. _framenet howto: framenet.html
|
20 |
+
.. _generate howto: generate.html
|
21 |
+
.. _gluesemantics howto: gluesemantics.html
|
22 |
+
.. _gluesemantics_malt howto: gluesemantics_malt.html
|
23 |
+
.. _grammar howto: grammar.html
|
24 |
+
.. _grammartestsuites howto: grammartestsuites.html
|
25 |
+
.. _index howto: index.html
|
26 |
+
.. _inference howto: inference.html
|
27 |
+
.. _internals howto: internals.html
|
28 |
+
.. _japanese howto: japanese.html
|
29 |
+
.. _logic howto: logic.html
|
30 |
+
.. _metrics howto: metrics.html
|
31 |
+
.. _misc howto: misc.html
|
32 |
+
.. _nonmonotonic howto: nonmonotonic.html
|
33 |
+
.. _parse howto: parse.html
|
34 |
+
.. _portuguese_en howto: portuguese_en.html
|
35 |
+
.. _probability howto: probability.html
|
36 |
+
.. _propbank howto: propbank.html
|
37 |
+
.. _relextract howto: relextract.html
|
38 |
+
.. _resolution howto: resolution.html
|
39 |
+
.. _semantics howto: semantics.html
|
40 |
+
.. _simple howto: simple.html
|
41 |
+
.. _stem howto: stem.html
|
42 |
+
.. _tag howto: tag.html
|
43 |
+
.. _tokenize howto: tokenize.html
|
44 |
+
.. _toolbox howto: toolbox.html
|
45 |
+
.. _tree howto: tree.html
|
46 |
+
.. _treetransforms howto: treetransforms.html
|
47 |
+
.. _util howto: util.html
|
48 |
+
.. _wordnet howto: wordnet.html
|
49 |
+
.. _wordnet_lch howto: wordnet_lch.html
|
50 |
+
|
51 |
+
===========
|
52 |
+
NLTK HOWTOs
|
53 |
+
===========
|
54 |
+
|
55 |
+
* `align HOWTO`_
|
56 |
+
* `ccg HOWTO`_
|
57 |
+
* `chat80 HOWTO`_
|
58 |
+
* `childes HOWTO`_
|
59 |
+
* `chunk HOWTO`_
|
60 |
+
* `classify HOWTO`_
|
61 |
+
* `collocations HOWTO`_
|
62 |
+
* `compat HOWTO`_
|
63 |
+
* `corpus HOWTO`_
|
64 |
+
* `data HOWTO`_
|
65 |
+
* `dependency HOWTO`_
|
66 |
+
* `discourse HOWTO`_
|
67 |
+
* `drt HOWTO`_
|
68 |
+
* `featgram HOWTO`_
|
69 |
+
* `featstruct HOWTO`_
|
70 |
+
* `framenet HOWTO`_
|
71 |
+
* `generate HOWTO`_
|
72 |
+
* `gluesemantics HOWTO`_
|
73 |
+
* `gluesemantics_malt HOWTO`_
|
74 |
+
* `grammar HOWTO`_
|
75 |
+
* `grammartestsuites HOWTO`_
|
76 |
+
* `index HOWTO`_
|
77 |
+
* `inference HOWTO`_
|
78 |
+
* `internals HOWTO`_
|
79 |
+
* `japanese HOWTO`_
|
80 |
+
* `logic HOWTO`_
|
81 |
+
* `metrics HOWTO`_
|
82 |
+
* `misc HOWTO`_
|
83 |
+
* `nonmonotonic HOWTO`_
|
84 |
+
* `parse HOWTO`_
|
85 |
+
* `portuguese_en HOWTO`_
|
86 |
+
* `probability HOWTO`_
|
87 |
+
* `propbank HOWTO`_
|
88 |
+
* `relextract HOWTO`_
|
89 |
+
* `resolution HOWTO`_
|
90 |
+
* `semantics HOWTO`_
|
91 |
+
* `simple HOWTO`_
|
92 |
+
* `stem HOWTO`_
|
93 |
+
* `tag HOWTO`_
|
94 |
+
* `tokenize HOWTO`_
|
95 |
+
* `toolbox HOWTO`_
|
96 |
+
* `tree HOWTO`_
|
97 |
+
* `treetransforms HOWTO`_
|
98 |
+
* `util HOWTO`_
|
99 |
+
* `wordnet HOWTO`_
|
100 |
+
* `wordnet_lch HOWTO`_
|
lib/python3.10/site-packages/nltk/test/inference.doctest
ADDED
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
====================================
|
5 |
+
Logical Inference and Model Building
|
6 |
+
====================================
|
7 |
+
|
8 |
+
>>> from nltk.test.setup_fixt import check_binary
|
9 |
+
>>> check_binary('mace4')
|
10 |
+
|
11 |
+
>>> from nltk import *
|
12 |
+
>>> from nltk.sem.drt import DrtParser
|
13 |
+
>>> from nltk.sem import logic
|
14 |
+
>>> logic._counter._value = 0
|
15 |
+
|
16 |
+
------------
|
17 |
+
Introduction
|
18 |
+
------------
|
19 |
+
|
20 |
+
Within the area of automated reasoning, first order theorem proving
|
21 |
+
and model building (or model generation) have both received much
|
22 |
+
attention, and have given rise to highly sophisticated techniques. We
|
23 |
+
focus therefore on providing an NLTK interface to third party tools
|
24 |
+
for these tasks. In particular, the module ``nltk.inference`` can be
|
25 |
+
used to access both theorem provers and model builders.
|
26 |
+
|
27 |
+
---------------------------------
|
28 |
+
NLTK Interface to Theorem Provers
|
29 |
+
---------------------------------
|
30 |
+
|
31 |
+
The main class used to interface with a theorem prover is the ``Prover``
|
32 |
+
class, found in ``nltk.api``. The ``prove()`` method takes three optional
|
33 |
+
arguments: a goal, a list of assumptions, and a ``verbose`` boolean to
|
34 |
+
indicate whether the proof should be printed to the console. The proof goal
|
35 |
+
and any assumptions need to be instances of the ``Expression`` class
|
36 |
+
specified by ``nltk.sem.logic``. There are currently three theorem provers
|
37 |
+
included with NLTK: ``Prover9``, ``TableauProver``, and
|
38 |
+
``ResolutionProver``. The first is an off-the-shelf prover, while the other
|
39 |
+
two are written in Python and included in the ``nltk.inference`` package.
|
40 |
+
|
41 |
+
>>> from nltk.sem import Expression
|
42 |
+
>>> read_expr = Expression.fromstring
|
43 |
+
>>> p1 = read_expr('man(socrates)')
|
44 |
+
>>> p2 = read_expr('all x.(man(x) -> mortal(x))')
|
45 |
+
>>> c = read_expr('mortal(socrates)')
|
46 |
+
>>> Prover9().prove(c, [p1,p2])
|
47 |
+
True
|
48 |
+
>>> TableauProver().prove(c, [p1,p2])
|
49 |
+
True
|
50 |
+
>>> ResolutionProver().prove(c, [p1,p2], verbose=True)
|
51 |
+
[1] {-mortal(socrates)} A
|
52 |
+
[2] {man(socrates)} A
|
53 |
+
[3] {-man(z2), mortal(z2)} A
|
54 |
+
[4] {-man(socrates)} (1, 3)
|
55 |
+
[5] {mortal(socrates)} (2, 3)
|
56 |
+
[6] {} (1, 5)
|
57 |
+
<BLANKLINE>
|
58 |
+
True
|
59 |
+
|
60 |
+
---------------------
|
61 |
+
The ``ProverCommand``
|
62 |
+
---------------------
|
63 |
+
|
64 |
+
A ``ProverCommand`` is a stateful holder for a theorem
|
65 |
+
prover. The command stores a theorem prover instance (of type ``Prover``),
|
66 |
+
a goal, a list of assumptions, the result of the proof, and a string version
|
67 |
+
of the entire proof. Corresponding to the three included ``Prover``
|
68 |
+
implementations, there are three ``ProverCommand`` implementations:
|
69 |
+
``Prover9Command``, ``TableauProverCommand``, and
|
70 |
+
``ResolutionProverCommand``.
|
71 |
+
|
72 |
+
The ``ProverCommand``'s constructor takes its goal and assumptions. The
|
73 |
+
``prove()`` command executes the ``Prover`` and ``proof()``
|
74 |
+
returns a String form of the proof
|
75 |
+
If the ``prove()`` method has not been called,
|
76 |
+
then the prover command will be unable to display a proof.
|
77 |
+
|
78 |
+
>>> prover = ResolutionProverCommand(c, [p1,p2])
|
79 |
+
>>> print(prover.proof())
|
80 |
+
Traceback (most recent call last):
|
81 |
+
File "...", line 1212, in __run
|
82 |
+
compileflags, 1) in test.globs
|
83 |
+
File "<doctest nltk/test/inference.doctest[10]>", line 1, in <module>
|
84 |
+
File "...", line ..., in proof
|
85 |
+
raise LookupError("You have to call prove() first to get a proof!")
|
86 |
+
LookupError: You have to call prove() first to get a proof!
|
87 |
+
>>> prover.prove()
|
88 |
+
True
|
89 |
+
>>> print(prover.proof())
|
90 |
+
[1] {-mortal(socrates)} A
|
91 |
+
[2] {man(socrates)} A
|
92 |
+
[3] {-man(z4), mortal(z4)} A
|
93 |
+
[4] {-man(socrates)} (1, 3)
|
94 |
+
[5] {mortal(socrates)} (2, 3)
|
95 |
+
[6] {} (1, 5)
|
96 |
+
<BLANKLINE>
|
97 |
+
|
98 |
+
The prover command stores the result of proving so that if ``prove()`` is
|
99 |
+
called again, then the command can return the result without executing the
|
100 |
+
prover again. This allows the user to access the result of the proof without
|
101 |
+
wasting time re-computing what it already knows.
|
102 |
+
|
103 |
+
>>> prover.prove()
|
104 |
+
True
|
105 |
+
>>> prover.prove()
|
106 |
+
True
|
107 |
+
|
108 |
+
The assumptions and goal may be accessed using the ``assumptions()`` and
|
109 |
+
``goal()`` methods, respectively.
|
110 |
+
|
111 |
+
>>> prover.assumptions()
|
112 |
+
[<ApplicationExpression man(socrates)>, <AllExpression all x.(man(x) -> mortal(x))>]
|
113 |
+
>>> prover.goal()
|
114 |
+
<ApplicationExpression mortal(socrates)>
|
115 |
+
|
116 |
+
The assumptions list may be modified using the ``add_assumptions()`` and
|
117 |
+
``retract_assumptions()`` methods. Both methods take a list of ``Expression``
|
118 |
+
objects. Since adding or removing assumptions may change the result of the
|
119 |
+
proof, the stored result is cleared when either of these methods are called.
|
120 |
+
That means that ``proof()`` will be unavailable until ``prove()`` is called and
|
121 |
+
a call to ``prove()`` will execute the theorem prover.
|
122 |
+
|
123 |
+
>>> prover.retract_assumptions([read_expr('man(socrates)')])
|
124 |
+
>>> print(prover.proof())
|
125 |
+
Traceback (most recent call last):
|
126 |
+
File "...", line 1212, in __run
|
127 |
+
compileflags, 1) in test.globs
|
128 |
+
File "<doctest nltk/test/inference.doctest[10]>", line 1, in <module>
|
129 |
+
File "...", line ..., in proof
|
130 |
+
raise LookupError("You have to call prove() first to get a proof!")
|
131 |
+
LookupError: You have to call prove() first to get a proof!
|
132 |
+
>>> prover.prove()
|
133 |
+
False
|
134 |
+
>>> print(prover.proof())
|
135 |
+
[1] {-mortal(socrates)} A
|
136 |
+
[2] {-man(z6), mortal(z6)} A
|
137 |
+
[3] {-man(socrates)} (1, 2)
|
138 |
+
<BLANKLINE>
|
139 |
+
>>> prover.add_assumptions([read_expr('man(socrates)')])
|
140 |
+
>>> prover.prove()
|
141 |
+
True
|
142 |
+
|
143 |
+
-------
|
144 |
+
Prover9
|
145 |
+
-------
|
146 |
+
|
147 |
+
Prover9 Installation
|
148 |
+
~~~~~~~~~~~~~~~~~~~~
|
149 |
+
|
150 |
+
You can download Prover9 from https://www.cs.unm.edu/~mccune/prover9/.
|
151 |
+
|
152 |
+
Extract the source code into a suitable directory and follow the
|
153 |
+
instructions in the Prover9 ``README.make`` file to compile the executables.
|
154 |
+
Install these into an appropriate location; the
|
155 |
+
``prover9_search`` variable is currently configured to look in the
|
156 |
+
following locations:
|
157 |
+
|
158 |
+
>>> p = Prover9()
|
159 |
+
>>> p.binary_locations()
|
160 |
+
['/usr/local/bin/prover9',
|
161 |
+
'/usr/local/bin/prover9/bin',
|
162 |
+
'/usr/local/bin',
|
163 |
+
'/usr/bin',
|
164 |
+
'/usr/local/prover9',
|
165 |
+
'/usr/local/share/prover9']
|
166 |
+
|
167 |
+
Alternatively, the environment variable ``PROVER9HOME`` may be configured with
|
168 |
+
the binary's location.
|
169 |
+
|
170 |
+
The path to the correct directory can be set manually in the following
|
171 |
+
manner:
|
172 |
+
|
173 |
+
>>> config_prover9(path='/usr/local/bin') # doctest: +SKIP
|
174 |
+
[Found prover9: /usr/local/bin/prover9]
|
175 |
+
|
176 |
+
If the executables cannot be found, ``Prover9`` will issue a warning message:
|
177 |
+
|
178 |
+
>>> p.prove() # doctest: +SKIP
|
179 |
+
Traceback (most recent call last):
|
180 |
+
...
|
181 |
+
LookupError:
|
182 |
+
===========================================================================
|
183 |
+
NLTK was unable to find the prover9 executable! Use config_prover9() or
|
184 |
+
set the PROVER9HOME environment variable.
|
185 |
+
<BLANKLINE>
|
186 |
+
>> config_prover9('/path/to/prover9')
|
187 |
+
<BLANKLINE>
|
188 |
+
For more information, on prover9, see:
|
189 |
+
<https://www.cs.unm.edu/~mccune/prover9/>
|
190 |
+
===========================================================================
|
191 |
+
|
192 |
+
|
193 |
+
Using Prover9
|
194 |
+
~~~~~~~~~~~~~
|
195 |
+
|
196 |
+
The general case in theorem proving is to determine whether ``S |- g``
|
197 |
+
holds, where ``S`` is a possibly empty set of assumptions, and ``g``
|
198 |
+
is a proof goal.
|
199 |
+
|
200 |
+
As mentioned earlier, NLTK input to ``Prover9`` must be
|
201 |
+
``Expression``\ s of ``nltk.sem.logic``. A ``Prover9`` instance is
|
202 |
+
initialized with a proof goal and, possibly, some assumptions. The
|
203 |
+
``prove()`` method attempts to find a proof of the goal, given the
|
204 |
+
list of assumptions (in this case, none).
|
205 |
+
|
206 |
+
>>> goal = read_expr('(man(x) <-> --man(x))')
|
207 |
+
>>> prover = Prover9Command(goal)
|
208 |
+
>>> prover.prove()
|
209 |
+
True
|
210 |
+
|
211 |
+
Given a ``ProverCommand`` instance ``prover``, the method
|
212 |
+
``prover.proof()`` will return a String of the extensive proof information
|
213 |
+
provided by Prover9, shown in abbreviated form here::
|
214 |
+
|
215 |
+
============================== Prover9 ===============================
|
216 |
+
Prover9 (32) version ...
|
217 |
+
Process ... was started by ... on ...
|
218 |
+
...
|
219 |
+
The command was ".../prover9 -f ...".
|
220 |
+
============================== end of head ===========================
|
221 |
+
|
222 |
+
============================== INPUT =================================
|
223 |
+
|
224 |
+
% Reading from file /var/...
|
225 |
+
|
226 |
+
|
227 |
+
formulas(goals).
|
228 |
+
(all x (man(x) -> man(x))).
|
229 |
+
end_of_list.
|
230 |
+
|
231 |
+
...
|
232 |
+
============================== end of search =========================
|
233 |
+
|
234 |
+
THEOREM PROVED
|
235 |
+
|
236 |
+
Exiting with 1 proof.
|
237 |
+
|
238 |
+
Process 6317 exit (max_proofs) Mon Jan 21 15:23:28 2008
|
239 |
+
|
240 |
+
|
241 |
+
As mentioned earlier, we may want to list some assumptions for
|
242 |
+
the proof, as shown here.
|
243 |
+
|
244 |
+
>>> g = read_expr('mortal(socrates)')
|
245 |
+
>>> a1 = read_expr('all x.(man(x) -> mortal(x))')
|
246 |
+
>>> prover = Prover9Command(g, assumptions=[a1])
|
247 |
+
>>> prover.print_assumptions()
|
248 |
+
all x.(man(x) -> mortal(x))
|
249 |
+
|
250 |
+
However, the assumptions are not sufficient to derive the goal:
|
251 |
+
|
252 |
+
>>> print(prover.prove())
|
253 |
+
False
|
254 |
+
|
255 |
+
So let's add another assumption:
|
256 |
+
|
257 |
+
>>> a2 = read_expr('man(socrates)')
|
258 |
+
>>> prover.add_assumptions([a2])
|
259 |
+
>>> prover.print_assumptions()
|
260 |
+
all x.(man(x) -> mortal(x))
|
261 |
+
man(socrates)
|
262 |
+
>>> print(prover.prove())
|
263 |
+
True
|
264 |
+
|
265 |
+
We can also show the assumptions in ``Prover9`` format.
|
266 |
+
|
267 |
+
>>> prover.print_assumptions(output_format='Prover9')
|
268 |
+
all x (man(x) -> mortal(x))
|
269 |
+
man(socrates)
|
270 |
+
|
271 |
+
>>> prover.print_assumptions(output_format='Spass')
|
272 |
+
Traceback (most recent call last):
|
273 |
+
. . .
|
274 |
+
NameError: Unrecognized value for 'output_format': Spass
|
275 |
+
|
276 |
+
Assumptions can be retracted from the list of assumptions.
|
277 |
+
|
278 |
+
>>> prover.retract_assumptions([a1])
|
279 |
+
>>> prover.print_assumptions()
|
280 |
+
man(socrates)
|
281 |
+
>>> prover.retract_assumptions([a1])
|
282 |
+
|
283 |
+
Statements can be loaded from a file and parsed. We can then add these
|
284 |
+
statements as new assumptions.
|
285 |
+
|
286 |
+
>>> g = read_expr('all x.(boxer(x) -> -boxerdog(x))')
|
287 |
+
>>> prover = Prover9Command(g)
|
288 |
+
>>> prover.prove()
|
289 |
+
False
|
290 |
+
>>> import nltk.data
|
291 |
+
>>> new = nltk.data.load('grammars/sample_grammars/background0.fol')
|
292 |
+
>>> for a in new:
|
293 |
+
... print(a)
|
294 |
+
all x.(boxerdog(x) -> dog(x))
|
295 |
+
all x.(boxer(x) -> person(x))
|
296 |
+
all x.-(dog(x) & person(x))
|
297 |
+
exists x.boxer(x)
|
298 |
+
exists x.boxerdog(x)
|
299 |
+
>>> prover.add_assumptions(new)
|
300 |
+
>>> print(prover.prove())
|
301 |
+
True
|
302 |
+
>>> print(prover.proof())
|
303 |
+
============================== prooftrans ============================
|
304 |
+
Prover9 (...) version ...
|
305 |
+
Process ... was started by ... on ...
|
306 |
+
...
|
307 |
+
The command was ".../prover9".
|
308 |
+
============================== end of head ===========================
|
309 |
+
<BLANKLINE>
|
310 |
+
============================== end of input ==========================
|
311 |
+
<BLANKLINE>
|
312 |
+
============================== PROOF =================================
|
313 |
+
<BLANKLINE>
|
314 |
+
% -------- Comments from original proof --------
|
315 |
+
% Proof 1 at ... seconds.
|
316 |
+
% Length of proof is 13.
|
317 |
+
% Level of proof is 4.
|
318 |
+
% Maximum clause weight is 0.
|
319 |
+
% Given clauses 0.
|
320 |
+
<BLANKLINE>
|
321 |
+
1 (all x (boxerdog(x) -> dog(x))). [assumption].
|
322 |
+
2 (all x (boxer(x) -> person(x))). [assumption].
|
323 |
+
3 (all x -(dog(x) & person(x))). [assumption].
|
324 |
+
6 (all x (boxer(x) -> -boxerdog(x))). [goal].
|
325 |
+
8 -boxerdog(x) | dog(x). [clausify(1)].
|
326 |
+
9 boxerdog(c3). [deny(6)].
|
327 |
+
11 -boxer(x) | person(x). [clausify(2)].
|
328 |
+
12 boxer(c3). [deny(6)].
|
329 |
+
14 -dog(x) | -person(x). [clausify(3)].
|
330 |
+
15 dog(c3). [resolve(9,a,8,a)].
|
331 |
+
18 person(c3). [resolve(12,a,11,a)].
|
332 |
+
19 -person(c3). [resolve(15,a,14,a)].
|
333 |
+
20 $F. [resolve(19,a,18,a)].
|
334 |
+
<BLANKLINE>
|
335 |
+
============================== end of proof ==========================
|
336 |
+
|
337 |
+
----------------------
|
338 |
+
The equiv() method
|
339 |
+
----------------------
|
340 |
+
|
341 |
+
One application of the theorem prover functionality is to check if
|
342 |
+
two Expressions have the same meaning.
|
343 |
+
The ``equiv()`` method calls a theorem prover to determine whether two
|
344 |
+
Expressions are logically equivalent.
|
345 |
+
|
346 |
+
>>> a = read_expr(r'exists x.(man(x) & walks(x))')
|
347 |
+
>>> b = read_expr(r'exists x.(walks(x) & man(x))')
|
348 |
+
>>> print(a.equiv(b))
|
349 |
+
True
|
350 |
+
|
351 |
+
The same method can be used on Discourse Representation Structures (DRSs).
|
352 |
+
In this case, each DRS is converted to a first order logic form, and then
|
353 |
+
passed to the theorem prover.
|
354 |
+
|
355 |
+
>>> dp = DrtParser()
|
356 |
+
>>> a = dp.parse(r'([x],[man(x), walks(x)])')
|
357 |
+
>>> b = dp.parse(r'([x],[walks(x), man(x)])')
|
358 |
+
>>> print(a.equiv(b))
|
359 |
+
True
|
360 |
+
|
361 |
+
|
362 |
+
--------------------------------
|
363 |
+
NLTK Interface to Model Builders
|
364 |
+
--------------------------------
|
365 |
+
|
366 |
+
The top-level to model builders is parallel to that for
|
367 |
+
theorem-provers. The ``ModelBuilder`` interface is located
|
368 |
+
in ``nltk.inference.api``. It is currently only implemented by
|
369 |
+
``Mace``, which interfaces with the Mace4 model builder.
|
370 |
+
|
371 |
+
Typically we use a model builder to show that some set of formulas has
|
372 |
+
a model, and is therefore consistent. One way of doing this is by
|
373 |
+
treating our candidate set of sentences as assumptions, and leaving
|
374 |
+
the goal unspecified.
|
375 |
+
Thus, the following interaction shows how both ``{a, c1}`` and ``{a, c2}``
|
376 |
+
are consistent sets, since Mace succeeds in a building a
|
377 |
+
model for each of them, while ``{c1, c2}`` is inconsistent.
|
378 |
+
|
379 |
+
>>> a3 = read_expr('exists x.(man(x) and walks(x))')
|
380 |
+
>>> c1 = read_expr('mortal(socrates)')
|
381 |
+
>>> c2 = read_expr('-mortal(socrates)')
|
382 |
+
>>> mace = Mace()
|
383 |
+
>>> print(mace.build_model(None, [a3, c1]))
|
384 |
+
True
|
385 |
+
>>> print(mace.build_model(None, [a3, c2]))
|
386 |
+
True
|
387 |
+
|
388 |
+
We can also use the model builder as an adjunct to theorem prover.
|
389 |
+
Let's suppose we are trying to prove ``S |- g``, i.e. that ``g``
|
390 |
+
is logically entailed by assumptions ``S = {s1, s2, ..., sn}``.
|
391 |
+
We can this same input to Mace4, and the model builder will try to
|
392 |
+
find a counterexample, that is, to show that ``g`` does *not* follow
|
393 |
+
from ``S``. So, given this input, Mace4 will try to find a model for
|
394 |
+
the set ``S' = {s1, s2, ..., sn, (not g)}``. If ``g`` fails to follow
|
395 |
+
from ``S``, then Mace4 may well return with a counterexample faster
|
396 |
+
than Prover9 concludes that it cannot find the required proof.
|
397 |
+
Conversely, if ``g`` *is* provable from ``S``, Mace4 may take a long
|
398 |
+
time unsuccessfully trying to find a counter model, and will eventually give up.
|
399 |
+
|
400 |
+
In the following example, we see that the model builder does succeed
|
401 |
+
in building a model of the assumptions together with the negation of
|
402 |
+
the goal. That is, it succeeds in finding a model
|
403 |
+
where there is a woman that every man loves; Adam is a man; Eve is a
|
404 |
+
woman; but Adam does not love Eve.
|
405 |
+
|
406 |
+
>>> a4 = read_expr('exists y. (woman(y) & all x. (man(x) -> love(x,y)))')
|
407 |
+
>>> a5 = read_expr('man(adam)')
|
408 |
+
>>> a6 = read_expr('woman(eve)')
|
409 |
+
>>> g = read_expr('love(adam,eve)')
|
410 |
+
>>> print(mace.build_model(g, [a4, a5, a6]))
|
411 |
+
True
|
412 |
+
|
413 |
+
The Model Builder will fail to find a model if the assumptions do entail
|
414 |
+
the goal. Mace will continue to look for models of ever-increasing sizes
|
415 |
+
until the end_size number is reached. By default, end_size is 500,
|
416 |
+
but it can be set manually for quicker response time.
|
417 |
+
|
418 |
+
>>> a7 = read_expr('all x.(man(x) -> mortal(x))')
|
419 |
+
>>> a8 = read_expr('man(socrates)')
|
420 |
+
>>> g2 = read_expr('mortal(socrates)')
|
421 |
+
>>> print(Mace(end_size=50).build_model(g2, [a7, a8]))
|
422 |
+
False
|
423 |
+
|
424 |
+
There is also a ``ModelBuilderCommand`` class that, like ``ProverCommand``,
|
425 |
+
stores a ``ModelBuilder``, a goal, assumptions, a result, and a model. The
|
426 |
+
only implementation in NLTK is ``MaceCommand``.
|
427 |
+
|
428 |
+
|
429 |
+
-----
|
430 |
+
Mace4
|
431 |
+
-----
|
432 |
+
|
433 |
+
Mace4 Installation
|
434 |
+
~~~~~~~~~~~~~~~~~~
|
435 |
+
|
436 |
+
Mace4 is packaged with Prover9, and can be downloaded from the same
|
437 |
+
source, namely https://www.cs.unm.edu/~mccune/prover9/. It is installed
|
438 |
+
in the same manner as Prover9.
|
439 |
+
|
440 |
+
Using Mace4
|
441 |
+
~~~~~~~~~~~
|
442 |
+
|
443 |
+
Check whether Mace4 can find a model.
|
444 |
+
|
445 |
+
>>> a = read_expr('(see(mary,john) & -(mary = john))')
|
446 |
+
>>> mb = MaceCommand(assumptions=[a])
|
447 |
+
>>> mb.build_model()
|
448 |
+
True
|
449 |
+
|
450 |
+
Show the model in 'tabular' format.
|
451 |
+
|
452 |
+
>>> print(mb.model(format='tabular'))
|
453 |
+
% number = 1
|
454 |
+
% seconds = 0
|
455 |
+
<BLANKLINE>
|
456 |
+
% Interpretation of size 2
|
457 |
+
<BLANKLINE>
|
458 |
+
john : 0
|
459 |
+
<BLANKLINE>
|
460 |
+
mary : 1
|
461 |
+
<BLANKLINE>
|
462 |
+
see :
|
463 |
+
| 0 1
|
464 |
+
---+----
|
465 |
+
0 | 0 0
|
466 |
+
1 | 1 0
|
467 |
+
<BLANKLINE>
|
468 |
+
|
469 |
+
Show the model in 'tabular' format.
|
470 |
+
|
471 |
+
>>> print(mb.model(format='cooked'))
|
472 |
+
% number = 1
|
473 |
+
% seconds = 0
|
474 |
+
<BLANKLINE>
|
475 |
+
% Interpretation of size 2
|
476 |
+
<BLANKLINE>
|
477 |
+
john = 0.
|
478 |
+
<BLANKLINE>
|
479 |
+
mary = 1.
|
480 |
+
<BLANKLINE>
|
481 |
+
- see(0,0).
|
482 |
+
- see(0,1).
|
483 |
+
see(1,0).
|
484 |
+
- see(1,1).
|
485 |
+
<BLANKLINE>
|
486 |
+
|
487 |
+
The property ``valuation`` accesses the stored ``Valuation``.
|
488 |
+
|
489 |
+
>>> print(mb.valuation)
|
490 |
+
{'john': 'a', 'mary': 'b', 'see': {('b', 'a')}}
|
491 |
+
|
492 |
+
We can return to our earlier example and inspect the model:
|
493 |
+
|
494 |
+
>>> mb = MaceCommand(g, assumptions=[a4, a5, a6])
|
495 |
+
>>> m = mb.build_model()
|
496 |
+
>>> print(mb.model(format='cooked'))
|
497 |
+
% number = 1
|
498 |
+
% seconds = 0
|
499 |
+
<BLANKLINE>
|
500 |
+
% Interpretation of size 2
|
501 |
+
<BLANKLINE>
|
502 |
+
adam = 0.
|
503 |
+
<BLANKLINE>
|
504 |
+
eve = 0.
|
505 |
+
<BLANKLINE>
|
506 |
+
c1 = 1.
|
507 |
+
<BLANKLINE>
|
508 |
+
man(0).
|
509 |
+
- man(1).
|
510 |
+
<BLANKLINE>
|
511 |
+
woman(0).
|
512 |
+
woman(1).
|
513 |
+
<BLANKLINE>
|
514 |
+
- love(0,0).
|
515 |
+
love(0,1).
|
516 |
+
- love(1,0).
|
517 |
+
- love(1,1).
|
518 |
+
<BLANKLINE>
|
519 |
+
|
520 |
+
Here, we can see that ``adam`` and ``eve`` have been assigned the same
|
521 |
+
individual, namely ``0`` as value; ``0`` is both a man and a woman; a second
|
522 |
+
individual ``1`` is also a woman; and ``0`` loves ``1``. Thus, this is
|
523 |
+
an interpretation in which there is a woman that every man loves but
|
524 |
+
Adam doesn't love Eve.
|
525 |
+
|
526 |
+
Mace can also be used with propositional logic.
|
527 |
+
|
528 |
+
>>> p = read_expr('P')
|
529 |
+
>>> q = read_expr('Q')
|
530 |
+
>>> mb = MaceCommand(q, [p, p>-q])
|
531 |
+
>>> mb.build_model()
|
532 |
+
True
|
533 |
+
>>> mb.valuation['P']
|
534 |
+
True
|
535 |
+
>>> mb.valuation['Q']
|
536 |
+
False
|
lib/python3.10/site-packages/nltk/test/internals.doctest
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==========================================
|
5 |
+
Unit tests for the nltk.utilities module
|
6 |
+
==========================================
|
7 |
+
|
8 |
+
overridden()
|
9 |
+
~~~~~~~~~~~~
|
10 |
+
>>> from nltk.internals import overridden
|
11 |
+
|
12 |
+
The typical use case is in defining methods for an interface or
|
13 |
+
abstract base class, in such a way that subclasses don't have to
|
14 |
+
implement all of the methods:
|
15 |
+
|
16 |
+
>>> class EaterI(object):
|
17 |
+
... '''Subclass must define eat() or batch_eat().'''
|
18 |
+
... def eat(self, food):
|
19 |
+
... if overridden(self.batch_eat):
|
20 |
+
... return self.batch_eat([food])[0]
|
21 |
+
... else:
|
22 |
+
... raise NotImplementedError()
|
23 |
+
... def batch_eat(self, foods):
|
24 |
+
... return [self.eat(food) for food in foods]
|
25 |
+
|
26 |
+
As long as a subclass implements one method, it will be used to
|
27 |
+
perform the other method:
|
28 |
+
|
29 |
+
>>> class GoodEater1(EaterI):
|
30 |
+
... def eat(self, food):
|
31 |
+
... return 'yum'
|
32 |
+
>>> GoodEater1().eat('steak')
|
33 |
+
'yum'
|
34 |
+
>>> GoodEater1().batch_eat(['steak', 'peas'])
|
35 |
+
['yum', 'yum']
|
36 |
+
|
37 |
+
>>> class GoodEater2(EaterI):
|
38 |
+
... def batch_eat(self, foods):
|
39 |
+
... return ['yum' for food in foods]
|
40 |
+
>>> GoodEater2().eat('steak')
|
41 |
+
'yum'
|
42 |
+
>>> GoodEater2().batch_eat(['steak', 'peas'])
|
43 |
+
['yum', 'yum']
|
44 |
+
|
45 |
+
But if a subclass doesn't implement either one, then they'll get an
|
46 |
+
error when they try to call them. (nb this is better than infinite
|
47 |
+
recursion):
|
48 |
+
|
49 |
+
>>> class BadEater1(EaterI):
|
50 |
+
... pass
|
51 |
+
>>> BadEater1().eat('steak')
|
52 |
+
Traceback (most recent call last):
|
53 |
+
. . .
|
54 |
+
NotImplementedError
|
55 |
+
>>> BadEater1().batch_eat(['steak', 'peas'])
|
56 |
+
Traceback (most recent call last):
|
57 |
+
. . .
|
58 |
+
NotImplementedError
|
59 |
+
|
60 |
+
Trying to use the abstract base class itself will also result in an
|
61 |
+
error:
|
62 |
+
|
63 |
+
>>> class EaterI(EaterI):
|
64 |
+
... pass
|
65 |
+
>>> EaterI().eat('steak')
|
66 |
+
Traceback (most recent call last):
|
67 |
+
. . .
|
68 |
+
NotImplementedError
|
69 |
+
>>> EaterI().batch_eat(['steak', 'peas'])
|
70 |
+
Traceback (most recent call last):
|
71 |
+
. . .
|
72 |
+
NotImplementedError
|
73 |
+
|
74 |
+
It's ok to use intermediate abstract classes:
|
75 |
+
|
76 |
+
>>> class AbstractEater(EaterI):
|
77 |
+
... pass
|
78 |
+
|
79 |
+
>>> class GoodEater3(AbstractEater):
|
80 |
+
... def eat(self, food):
|
81 |
+
... return 'yum'
|
82 |
+
...
|
83 |
+
>>> GoodEater3().eat('steak')
|
84 |
+
'yum'
|
85 |
+
>>> GoodEater3().batch_eat(['steak', 'peas'])
|
86 |
+
['yum', 'yum']
|
87 |
+
|
88 |
+
>>> class GoodEater4(AbstractEater):
|
89 |
+
... def batch_eat(self, foods):
|
90 |
+
... return ['yum' for food in foods]
|
91 |
+
>>> GoodEater4().eat('steak')
|
92 |
+
'yum'
|
93 |
+
>>> GoodEater4().batch_eat(['steak', 'peas'])
|
94 |
+
['yum', 'yum']
|
95 |
+
|
96 |
+
>>> class BadEater2(AbstractEater):
|
97 |
+
... pass
|
98 |
+
>>> BadEater2().eat('steak')
|
99 |
+
Traceback (most recent call last):
|
100 |
+
. . .
|
101 |
+
NotImplementedError
|
102 |
+
>>> BadEater2().batch_eat(['steak', 'peas'])
|
103 |
+
Traceback (most recent call last):
|
104 |
+
. . .
|
105 |
+
NotImplementedError
|
106 |
+
|
107 |
+
Here's some extra tests:
|
108 |
+
|
109 |
+
>>> class A(object):
|
110 |
+
... def f(x): pass
|
111 |
+
>>> class B(A):
|
112 |
+
... def f(x): pass
|
113 |
+
>>> class C(A): pass
|
114 |
+
>>> class D(B): pass
|
115 |
+
|
116 |
+
>>> overridden(A().f)
|
117 |
+
False
|
118 |
+
>>> overridden(B().f)
|
119 |
+
True
|
120 |
+
>>> overridden(C().f)
|
121 |
+
False
|
122 |
+
>>> overridden(D().f)
|
123 |
+
True
|
124 |
+
|
125 |
+
It works for classic classes, too:
|
126 |
+
|
127 |
+
>>> class A:
|
128 |
+
... def f(x): pass
|
129 |
+
>>> class B(A):
|
130 |
+
... def f(x): pass
|
131 |
+
>>> class C(A): pass
|
132 |
+
>>> class D(B): pass
|
133 |
+
>>> overridden(A().f)
|
134 |
+
False
|
135 |
+
>>> overridden(B().f)
|
136 |
+
True
|
137 |
+
>>> overridden(C().f)
|
138 |
+
False
|
139 |
+
>>> overridden(D().f)
|
140 |
+
True
|
141 |
+
|
142 |
+
|
143 |
+
read_str()
|
144 |
+
~~~~~~~~~~~~
|
145 |
+
>>> from nltk.internals import read_str
|
146 |
+
|
147 |
+
Test valid scenarios
|
148 |
+
|
149 |
+
>>> read_str("'valid string'", 0)
|
150 |
+
('valid string', 14)
|
151 |
+
|
152 |
+
Now test invalid scenarios
|
153 |
+
|
154 |
+
>>> read_str("should error", 0)
|
155 |
+
Traceback (most recent call last):
|
156 |
+
...
|
157 |
+
nltk.internals.ReadError: Expected open quote at 0
|
158 |
+
>>> read_str("'should error", 0)
|
159 |
+
Traceback (most recent call last):
|
160 |
+
...
|
161 |
+
nltk.internals.ReadError: Expected close quote at 1
|
lib/python3.10/site-packages/nltk/test/logic.doctest
ADDED
@@ -0,0 +1,1096 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=======================
|
5 |
+
Logic & Lambda Calculus
|
6 |
+
=======================
|
7 |
+
|
8 |
+
The `nltk.logic` package allows expressions of First-Order Logic (FOL) to be
|
9 |
+
parsed into ``Expression`` objects. In addition to FOL, the parser
|
10 |
+
handles lambda-abstraction with variables of higher order.
|
11 |
+
|
12 |
+
--------
|
13 |
+
Overview
|
14 |
+
--------
|
15 |
+
|
16 |
+
>>> from nltk.sem.logic import *
|
17 |
+
|
18 |
+
The default inventory of logical constants is the following:
|
19 |
+
|
20 |
+
>>> boolean_ops()
|
21 |
+
negation -
|
22 |
+
conjunction &
|
23 |
+
disjunction |
|
24 |
+
implication ->
|
25 |
+
equivalence <->
|
26 |
+
>>> equality_preds()
|
27 |
+
equality =
|
28 |
+
inequality !=
|
29 |
+
>>> binding_ops()
|
30 |
+
existential exists
|
31 |
+
universal all
|
32 |
+
lambda \
|
33 |
+
|
34 |
+
----------------
|
35 |
+
Regression Tests
|
36 |
+
----------------
|
37 |
+
|
38 |
+
|
39 |
+
Untyped Logic
|
40 |
+
+++++++++++++
|
41 |
+
|
42 |
+
Process logical expressions conveniently:
|
43 |
+
|
44 |
+
>>> read_expr = Expression.fromstring
|
45 |
+
|
46 |
+
Test for equality under alpha-conversion
|
47 |
+
========================================
|
48 |
+
|
49 |
+
>>> e1 = read_expr('exists x.P(x)')
|
50 |
+
>>> print(e1)
|
51 |
+
exists x.P(x)
|
52 |
+
>>> e2 = e1.alpha_convert(Variable('z'))
|
53 |
+
>>> print(e2)
|
54 |
+
exists z.P(z)
|
55 |
+
>>> e1 == e2
|
56 |
+
True
|
57 |
+
|
58 |
+
|
59 |
+
>>> l = read_expr(r'\X.\X.X(X)(1)').simplify()
|
60 |
+
>>> id = read_expr(r'\X.X(X)')
|
61 |
+
>>> l == id
|
62 |
+
True
|
63 |
+
|
64 |
+
Test numerals
|
65 |
+
=============
|
66 |
+
|
67 |
+
>>> zero = read_expr(r'\F x.x')
|
68 |
+
>>> one = read_expr(r'\F x.F(x)')
|
69 |
+
>>> two = read_expr(r'\F x.F(F(x))')
|
70 |
+
>>> three = read_expr(r'\F x.F(F(F(x)))')
|
71 |
+
>>> four = read_expr(r'\F x.F(F(F(F(x))))')
|
72 |
+
>>> succ = read_expr(r'\N F x.F(N(F,x))')
|
73 |
+
>>> plus = read_expr(r'\M N F x.M(F,N(F,x))')
|
74 |
+
>>> mult = read_expr(r'\M N F.M(N(F))')
|
75 |
+
>>> pred = read_expr(r'\N F x.(N(\G H.H(G(F)))(\u.x)(\u.u))')
|
76 |
+
>>> v1 = ApplicationExpression(succ, zero).simplify()
|
77 |
+
>>> v1 == one
|
78 |
+
True
|
79 |
+
>>> v2 = ApplicationExpression(succ, v1).simplify()
|
80 |
+
>>> v2 == two
|
81 |
+
True
|
82 |
+
>>> v3 = ApplicationExpression(ApplicationExpression(plus, v1), v2).simplify()
|
83 |
+
>>> v3 == three
|
84 |
+
True
|
85 |
+
>>> v4 = ApplicationExpression(ApplicationExpression(mult, v2), v2).simplify()
|
86 |
+
>>> v4 == four
|
87 |
+
True
|
88 |
+
>>> v5 = ApplicationExpression(pred, ApplicationExpression(pred, v4)).simplify()
|
89 |
+
>>> v5 == two
|
90 |
+
True
|
91 |
+
|
92 |
+
Overloaded operators also exist, for convenience.
|
93 |
+
|
94 |
+
>>> print(succ(zero).simplify() == one)
|
95 |
+
True
|
96 |
+
>>> print(plus(one,two).simplify() == three)
|
97 |
+
True
|
98 |
+
>>> print(mult(two,two).simplify() == four)
|
99 |
+
True
|
100 |
+
>>> print(pred(pred(four)).simplify() == two)
|
101 |
+
True
|
102 |
+
|
103 |
+
>>> john = read_expr(r'john')
|
104 |
+
>>> man = read_expr(r'\x.man(x)')
|
105 |
+
>>> walk = read_expr(r'\x.walk(x)')
|
106 |
+
>>> man(john).simplify()
|
107 |
+
<ApplicationExpression man(john)>
|
108 |
+
>>> print(-walk(john).simplify())
|
109 |
+
-walk(john)
|
110 |
+
>>> print((man(john) & walk(john)).simplify())
|
111 |
+
(man(john) & walk(john))
|
112 |
+
>>> print((man(john) | walk(john)).simplify())
|
113 |
+
(man(john) | walk(john))
|
114 |
+
>>> print((man(john) > walk(john)).simplify())
|
115 |
+
(man(john) -> walk(john))
|
116 |
+
>>> print((man(john) < walk(john)).simplify())
|
117 |
+
(man(john) <-> walk(john))
|
118 |
+
|
119 |
+
Python's built-in lambda operator can also be used with Expressions
|
120 |
+
|
121 |
+
>>> john = VariableExpression(Variable('john'))
|
122 |
+
>>> run_var = VariableExpression(Variable('run'))
|
123 |
+
>>> run = lambda x: run_var(x)
|
124 |
+
>>> run(john)
|
125 |
+
<ApplicationExpression run(john)>
|
126 |
+
|
127 |
+
|
128 |
+
``betaConversionTestSuite.pl``
|
129 |
+
------------------------------
|
130 |
+
|
131 |
+
Tests based on Blackburn & Bos' book, *Representation and Inference
|
132 |
+
for Natural Language*.
|
133 |
+
|
134 |
+
>>> x1 = read_expr(r'\P.P(mia)(\x.walk(x))').simplify()
|
135 |
+
>>> x2 = read_expr(r'walk(mia)').simplify()
|
136 |
+
>>> x1 == x2
|
137 |
+
True
|
138 |
+
|
139 |
+
>>> x1 = read_expr(r'exists x.(man(x) & ((\P.exists x.(woman(x) & P(x)))(\y.love(x,y))))').simplify()
|
140 |
+
>>> x2 = read_expr(r'exists x.(man(x) & exists y.(woman(y) & love(x,y)))').simplify()
|
141 |
+
>>> x1 == x2
|
142 |
+
True
|
143 |
+
>>> x1 = read_expr(r'\a.sleep(a)(mia)').simplify()
|
144 |
+
>>> x2 = read_expr(r'sleep(mia)').simplify()
|
145 |
+
>>> x1 == x2
|
146 |
+
True
|
147 |
+
>>> x1 = read_expr(r'\a.\b.like(b,a)(mia)').simplify()
|
148 |
+
>>> x2 = read_expr(r'\b.like(b,mia)').simplify()
|
149 |
+
>>> x1 == x2
|
150 |
+
True
|
151 |
+
>>> x1 = read_expr(r'\a.(\b.like(b,a)(vincent))').simplify()
|
152 |
+
>>> x2 = read_expr(r'\a.like(vincent,a)').simplify()
|
153 |
+
>>> x1 == x2
|
154 |
+
True
|
155 |
+
>>> x1 = read_expr(r'\a.((\b.like(b,a)(vincent)) & sleep(a))').simplify()
|
156 |
+
>>> x2 = read_expr(r'\a.(like(vincent,a) & sleep(a))').simplify()
|
157 |
+
>>> x1 == x2
|
158 |
+
True
|
159 |
+
|
160 |
+
>>> x1 = read_expr(r'(\a.\b.like(b,a)(mia)(vincent))').simplify()
|
161 |
+
>>> x2 = read_expr(r'like(vincent,mia)').simplify()
|
162 |
+
>>> x1 == x2
|
163 |
+
True
|
164 |
+
|
165 |
+
>>> x1 = read_expr(r'P((\a.sleep(a)(vincent)))').simplify()
|
166 |
+
>>> x2 = read_expr(r'P(sleep(vincent))').simplify()
|
167 |
+
>>> x1 == x2
|
168 |
+
True
|
169 |
+
|
170 |
+
>>> x1 = read_expr(r'\A.A((\b.sleep(b)(vincent)))').simplify()
|
171 |
+
>>> x2 = read_expr(r'\A.A(sleep(vincent))').simplify()
|
172 |
+
>>> x1 == x2
|
173 |
+
True
|
174 |
+
|
175 |
+
>>> x1 = read_expr(r'\A.A(sleep(vincent))').simplify()
|
176 |
+
>>> x2 = read_expr(r'\A.A(sleep(vincent))').simplify()
|
177 |
+
>>> x1 == x2
|
178 |
+
True
|
179 |
+
|
180 |
+
>>> x1 = read_expr(r'(\A.A(vincent)(\b.sleep(b)))').simplify()
|
181 |
+
>>> x2 = read_expr(r'sleep(vincent)').simplify()
|
182 |
+
>>> x1 == x2
|
183 |
+
True
|
184 |
+
|
185 |
+
>>> x1 = read_expr(r'\A.believe(mia,A(vincent))(\b.sleep(b))').simplify()
|
186 |
+
>>> x2 = read_expr(r'believe(mia,sleep(vincent))').simplify()
|
187 |
+
>>> x1 == x2
|
188 |
+
True
|
189 |
+
|
190 |
+
>>> x1 = read_expr(r'(\A.(A(vincent) & A(mia)))(\b.sleep(b))').simplify()
|
191 |
+
>>> x2 = read_expr(r'(sleep(vincent) & sleep(mia))').simplify()
|
192 |
+
>>> x1 == x2
|
193 |
+
True
|
194 |
+
|
195 |
+
>>> x1 = read_expr(r'\A.\B.(\C.C(A(vincent))(\d.probably(d)) & (\C.C(B(mia))(\d.improbably(d))))(\f.walk(f))(\f.talk(f))').simplify()
|
196 |
+
>>> x2 = read_expr(r'(probably(walk(vincent)) & improbably(talk(mia)))').simplify()
|
197 |
+
>>> x1 == x2
|
198 |
+
True
|
199 |
+
|
200 |
+
>>> x1 = read_expr(r'(\a.\b.(\C.C(a,b)(\d.\f.love(d,f))))(jules)(mia)').simplify()
|
201 |
+
>>> x2 = read_expr(r'love(jules,mia)').simplify()
|
202 |
+
>>> x1 == x2
|
203 |
+
True
|
204 |
+
|
205 |
+
>>> x1 = read_expr(r'(\A.\B.exists c.(A(c) & B(c)))(\d.boxer(d),\d.sleep(d))').simplify()
|
206 |
+
>>> x2 = read_expr(r'exists c.(boxer(c) & sleep(c))').simplify()
|
207 |
+
>>> x1 == x2
|
208 |
+
True
|
209 |
+
|
210 |
+
>>> x1 = read_expr(r'\A.Z(A)(\c.\a.like(a,c))').simplify()
|
211 |
+
>>> x2 = read_expr(r'Z(\c.\a.like(a,c))').simplify()
|
212 |
+
>>> x1 == x2
|
213 |
+
True
|
214 |
+
|
215 |
+
>>> x1 = read_expr(r'\A.\b.A(b)(\c.\b.like(b,c))').simplify()
|
216 |
+
>>> x2 = read_expr(r'\b.(\c.\b.like(b,c)(b))').simplify()
|
217 |
+
>>> x1 == x2
|
218 |
+
True
|
219 |
+
|
220 |
+
>>> x1 = read_expr(r'(\a.\b.(\C.C(a,b)(\b.\a.loves(b,a))))(jules)(mia)').simplify()
|
221 |
+
>>> x2 = read_expr(r'loves(jules,mia)').simplify()
|
222 |
+
>>> x1 == x2
|
223 |
+
True
|
224 |
+
|
225 |
+
>>> x1 = read_expr(r'(\A.\b.(exists b.A(b) & A(b)))(\c.boxer(c))(vincent)').simplify()
|
226 |
+
>>> x2 = read_expr(r'((exists b.boxer(b)) & boxer(vincent))').simplify()
|
227 |
+
>>> x1 == x2
|
228 |
+
True
|
229 |
+
|
230 |
+
Test Parser
|
231 |
+
===========
|
232 |
+
|
233 |
+
>>> print(read_expr(r'john'))
|
234 |
+
john
|
235 |
+
>>> print(read_expr(r'x'))
|
236 |
+
x
|
237 |
+
>>> print(read_expr(r'-man(x)'))
|
238 |
+
-man(x)
|
239 |
+
>>> print(read_expr(r'--man(x)'))
|
240 |
+
--man(x)
|
241 |
+
>>> print(read_expr(r'(man(x))'))
|
242 |
+
man(x)
|
243 |
+
>>> print(read_expr(r'((man(x)))'))
|
244 |
+
man(x)
|
245 |
+
>>> print(read_expr(r'man(x) <-> tall(x)'))
|
246 |
+
(man(x) <-> tall(x))
|
247 |
+
>>> print(read_expr(r'(man(x) <-> tall(x))'))
|
248 |
+
(man(x) <-> tall(x))
|
249 |
+
>>> print(read_expr(r'(man(x) & tall(x) & walks(x))'))
|
250 |
+
(man(x) & tall(x) & walks(x))
|
251 |
+
>>> print(read_expr(r'(man(x) & tall(x) & walks(x))').first)
|
252 |
+
(man(x) & tall(x))
|
253 |
+
>>> print(read_expr(r'man(x) | tall(x) & walks(x)'))
|
254 |
+
(man(x) | (tall(x) & walks(x)))
|
255 |
+
>>> print(read_expr(r'((man(x) & tall(x)) | walks(x))'))
|
256 |
+
((man(x) & tall(x)) | walks(x))
|
257 |
+
>>> print(read_expr(r'man(x) & (tall(x) | walks(x))'))
|
258 |
+
(man(x) & (tall(x) | walks(x)))
|
259 |
+
>>> print(read_expr(r'(man(x) & (tall(x) | walks(x)))'))
|
260 |
+
(man(x) & (tall(x) | walks(x)))
|
261 |
+
>>> print(read_expr(r'P(x) -> Q(x) <-> R(x) | S(x) & T(x)'))
|
262 |
+
((P(x) -> Q(x)) <-> (R(x) | (S(x) & T(x))))
|
263 |
+
>>> print(read_expr(r'exists x.man(x)'))
|
264 |
+
exists x.man(x)
|
265 |
+
>>> print(read_expr(r'exists x.(man(x) & tall(x))'))
|
266 |
+
exists x.(man(x) & tall(x))
|
267 |
+
>>> print(read_expr(r'exists x.(man(x) & tall(x) & walks(x))'))
|
268 |
+
exists x.(man(x) & tall(x) & walks(x))
|
269 |
+
>>> print(read_expr(r'-P(x) & Q(x)'))
|
270 |
+
(-P(x) & Q(x))
|
271 |
+
>>> read_expr(r'-P(x) & Q(x)') == read_expr(r'(-P(x)) & Q(x)')
|
272 |
+
True
|
273 |
+
>>> print(read_expr(r'\x.man(x)'))
|
274 |
+
\x.man(x)
|
275 |
+
>>> print(read_expr(r'\x.man(x)(john)'))
|
276 |
+
\x.man(x)(john)
|
277 |
+
>>> print(read_expr(r'\x.man(x)(john) & tall(x)'))
|
278 |
+
(\x.man(x)(john) & tall(x))
|
279 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)'))
|
280 |
+
\x y.sees(x,y)
|
281 |
+
>>> print(read_expr(r'\x y.sees(x,y)'))
|
282 |
+
\x y.sees(x,y)
|
283 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(a)'))
|
284 |
+
(\x y.sees(x,y))(a)
|
285 |
+
>>> print(read_expr(r'\x y.sees(x,y)(a)'))
|
286 |
+
(\x y.sees(x,y))(a)
|
287 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(a)(b)'))
|
288 |
+
((\x y.sees(x,y))(a))(b)
|
289 |
+
>>> print(read_expr(r'\x y.sees(x,y)(a)(b)'))
|
290 |
+
((\x y.sees(x,y))(a))(b)
|
291 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(a,b)'))
|
292 |
+
((\x y.sees(x,y))(a))(b)
|
293 |
+
>>> print(read_expr(r'\x y.sees(x,y)(a,b)'))
|
294 |
+
((\x y.sees(x,y))(a))(b)
|
295 |
+
>>> print(read_expr(r'((\x.\y.sees(x,y))(a))(b)'))
|
296 |
+
((\x y.sees(x,y))(a))(b)
|
297 |
+
>>> print(read_expr(r'P(x)(y)(z)'))
|
298 |
+
P(x,y,z)
|
299 |
+
>>> print(read_expr(r'P(Q)'))
|
300 |
+
P(Q)
|
301 |
+
>>> print(read_expr(r'P(Q(x))'))
|
302 |
+
P(Q(x))
|
303 |
+
>>> print(read_expr(r'(\x.exists y.walks(x,y))(x)'))
|
304 |
+
(\x.exists y.walks(x,y))(x)
|
305 |
+
>>> print(read_expr(r'exists x.(x = john)'))
|
306 |
+
exists x.(x = john)
|
307 |
+
>>> print(read_expr(r'((\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))'))
|
308 |
+
((\P Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))
|
309 |
+
>>> a = read_expr(r'exists c.exists b.A(b,c) & A(b,c)')
|
310 |
+
>>> b = read_expr(r'(exists c.(exists b.A(b,c))) & A(b,c)')
|
311 |
+
>>> print(a == b)
|
312 |
+
True
|
313 |
+
>>> a = read_expr(r'exists c.(exists b.A(b,c) & A(b,c))')
|
314 |
+
>>> b = read_expr(r'exists c.((exists b.A(b,c)) & A(b,c))')
|
315 |
+
>>> print(a == b)
|
316 |
+
True
|
317 |
+
>>> print(read_expr(r'exists x.x = y'))
|
318 |
+
exists x.(x = y)
|
319 |
+
>>> print(read_expr('A(B)(C)'))
|
320 |
+
A(B,C)
|
321 |
+
>>> print(read_expr('(A(B))(C)'))
|
322 |
+
A(B,C)
|
323 |
+
>>> print(read_expr('A((B)(C))'))
|
324 |
+
A(B(C))
|
325 |
+
>>> print(read_expr('A(B(C))'))
|
326 |
+
A(B(C))
|
327 |
+
>>> print(read_expr('(A)(B(C))'))
|
328 |
+
A(B(C))
|
329 |
+
>>> print(read_expr('(((A)))(((B))(((C))))'))
|
330 |
+
A(B(C))
|
331 |
+
>>> print(read_expr(r'A != B'))
|
332 |
+
-(A = B)
|
333 |
+
>>> print(read_expr('P(x) & x=y & P(y)'))
|
334 |
+
(P(x) & (x = y) & P(y))
|
335 |
+
>>> try: print(read_expr(r'\walk.walk(x)'))
|
336 |
+
... except LogicalExpressionException as e: print(e)
|
337 |
+
'walk' is an illegal variable name. Constants may not be abstracted.
|
338 |
+
\walk.walk(x)
|
339 |
+
^
|
340 |
+
>>> try: print(read_expr(r'all walk.walk(john)'))
|
341 |
+
... except LogicalExpressionException as e: print(e)
|
342 |
+
'walk' is an illegal variable name. Constants may not be quantified.
|
343 |
+
all walk.walk(john)
|
344 |
+
^
|
345 |
+
>>> try: print(read_expr(r'x(john)'))
|
346 |
+
... except LogicalExpressionException as e: print(e)
|
347 |
+
'x' is an illegal predicate name. Individual variables may not be used as predicates.
|
348 |
+
x(john)
|
349 |
+
^
|
350 |
+
|
351 |
+
>>> from nltk.sem.logic import LogicParser # hack to give access to custom quote chars
|
352 |
+
>>> lpq = LogicParser()
|
353 |
+
>>> lpq.quote_chars = [("'", "'", "\\", False)]
|
354 |
+
>>> print(lpq.parse(r"(man(x) & 'tall\'s,' (x) & walks (x) )"))
|
355 |
+
(man(x) & tall's,(x) & walks(x))
|
356 |
+
>>> lpq.quote_chars = [("'", "'", "\\", True)]
|
357 |
+
>>> print(lpq.parse(r"'tall\'s,'"))
|
358 |
+
'tall\'s,'
|
359 |
+
>>> print(lpq.parse(r"'spaced name(x)'"))
|
360 |
+
'spaced name(x)'
|
361 |
+
>>> print(lpq.parse(r"-'tall\'s,'(x)"))
|
362 |
+
-'tall\'s,'(x)
|
363 |
+
>>> print(lpq.parse(r"(man(x) & 'tall\'s,' (x) & walks (x) )"))
|
364 |
+
(man(x) & 'tall\'s,'(x) & walks(x))
|
365 |
+
|
366 |
+
|
367 |
+
Simplify
|
368 |
+
========
|
369 |
+
|
370 |
+
>>> print(read_expr(r'\x.man(x)(john)').simplify())
|
371 |
+
man(john)
|
372 |
+
>>> print(read_expr(r'\x.((man(x)))(john)').simplify())
|
373 |
+
man(john)
|
374 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(john, mary)').simplify())
|
375 |
+
sees(john,mary)
|
376 |
+
>>> print(read_expr(r'\x y.sees(x,y)(john, mary)').simplify())
|
377 |
+
sees(john,mary)
|
378 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(john)(mary)').simplify())
|
379 |
+
sees(john,mary)
|
380 |
+
>>> print(read_expr(r'\x y.sees(x,y)(john)(mary)').simplify())
|
381 |
+
sees(john,mary)
|
382 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(john)').simplify())
|
383 |
+
\y.sees(john,y)
|
384 |
+
>>> print(read_expr(r'\x y.sees(x,y)(john)').simplify())
|
385 |
+
\y.sees(john,y)
|
386 |
+
>>> print(read_expr(r'(\x.\y.sees(x,y)(john))(mary)').simplify())
|
387 |
+
sees(john,mary)
|
388 |
+
>>> print(read_expr(r'(\x y.sees(x,y)(john))(mary)').simplify())
|
389 |
+
sees(john,mary)
|
390 |
+
>>> print(read_expr(r'exists x.(man(x) & (\x.exists y.walks(x,y))(x))').simplify())
|
391 |
+
exists x.(man(x) & exists y.walks(x,y))
|
392 |
+
>>> e1 = read_expr(r'exists x.(man(x) & (\x.exists y.walks(x,y))(y))').simplify()
|
393 |
+
>>> e2 = read_expr(r'exists x.(man(x) & exists z1.walks(y,z1))')
|
394 |
+
>>> e1 == e2
|
395 |
+
True
|
396 |
+
>>> print(read_expr(r'(\P Q.exists x.(P(x) & Q(x)))(\x.dog(x))').simplify())
|
397 |
+
\Q.exists x.(dog(x) & Q(x))
|
398 |
+
>>> print(read_expr(r'((\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))').simplify())
|
399 |
+
exists x.(dog(x) & bark(x))
|
400 |
+
>>> print(read_expr(r'\P.(P(x)(y))(\a b.Q(a,b))').simplify())
|
401 |
+
Q(x,y)
|
402 |
+
|
403 |
+
Replace
|
404 |
+
=======
|
405 |
+
|
406 |
+
>>> a = read_expr(r'a')
|
407 |
+
>>> x = read_expr(r'x')
|
408 |
+
>>> y = read_expr(r'y')
|
409 |
+
>>> z = read_expr(r'z')
|
410 |
+
|
411 |
+
>>> print(read_expr(r'man(x)').replace(x.variable, a, False))
|
412 |
+
man(a)
|
413 |
+
>>> print(read_expr(r'(man(x) & tall(x))').replace(x.variable, a, False))
|
414 |
+
(man(a) & tall(a))
|
415 |
+
>>> print(read_expr(r'exists x.man(x)').replace(x.variable, a, False))
|
416 |
+
exists x.man(x)
|
417 |
+
>>> print(read_expr(r'exists x.man(x)').replace(x.variable, a, True))
|
418 |
+
exists a.man(a)
|
419 |
+
>>> print(read_expr(r'exists x.give(x,y,z)').replace(y.variable, a, False))
|
420 |
+
exists x.give(x,a,z)
|
421 |
+
>>> print(read_expr(r'exists x.give(x,y,z)').replace(y.variable, a, True))
|
422 |
+
exists x.give(x,a,z)
|
423 |
+
>>> e1 = read_expr(r'exists x.give(x,y,z)').replace(y.variable, x, False)
|
424 |
+
>>> e2 = read_expr(r'exists z1.give(z1,x,z)')
|
425 |
+
>>> e1 == e2
|
426 |
+
True
|
427 |
+
>>> e1 = read_expr(r'exists x.give(x,y,z)').replace(y.variable, x, True)
|
428 |
+
>>> e2 = read_expr(r'exists z1.give(z1,x,z)')
|
429 |
+
>>> e1 == e2
|
430 |
+
True
|
431 |
+
>>> print(read_expr(r'\x y z.give(x,y,z)').replace(y.variable, a, False))
|
432 |
+
\x y z.give(x,y,z)
|
433 |
+
>>> print(read_expr(r'\x y z.give(x,y,z)').replace(y.variable, a, True))
|
434 |
+
\x a z.give(x,a,z)
|
435 |
+
>>> print(read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, a, False))
|
436 |
+
\x y.give(x,y,a)
|
437 |
+
>>> print(read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, a, True))
|
438 |
+
\x y.give(x,y,a)
|
439 |
+
>>> e1 = read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, x, False)
|
440 |
+
>>> e2 = read_expr(r'\z1.\y.give(z1,y,x)')
|
441 |
+
>>> e1 == e2
|
442 |
+
True
|
443 |
+
>>> e1 = read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, x, True)
|
444 |
+
>>> e2 = read_expr(r'\z1.\y.give(z1,y,x)')
|
445 |
+
>>> e1 == e2
|
446 |
+
True
|
447 |
+
>>> print(read_expr(r'\x.give(x,y,z)').replace(z.variable, y, False))
|
448 |
+
\x.give(x,y,y)
|
449 |
+
>>> print(read_expr(r'\x.give(x,y,z)').replace(z.variable, y, True))
|
450 |
+
\x.give(x,y,y)
|
451 |
+
|
452 |
+
>>> from nltk.sem import logic
|
453 |
+
>>> logic._counter._value = 0
|
454 |
+
>>> e1 = read_expr('e1')
|
455 |
+
>>> e2 = read_expr('e2')
|
456 |
+
>>> print(read_expr('exists e1 e2.(walk(e1) & talk(e2))').replace(e1.variable, e2, True))
|
457 |
+
exists e2 e01.(walk(e2) & talk(e01))
|
458 |
+
|
459 |
+
|
460 |
+
Variables / Free
|
461 |
+
================
|
462 |
+
|
463 |
+
>>> examples = [r'walk(john)',
|
464 |
+
... r'walk(x)',
|
465 |
+
... r'?vp(?np)',
|
466 |
+
... r'see(john,mary)',
|
467 |
+
... r'exists x.walk(x)',
|
468 |
+
... r'\x.see(john,x)',
|
469 |
+
... r'\x.see(john,x)(mary)',
|
470 |
+
... r'P(x)',
|
471 |
+
... r'\P.P(x)',
|
472 |
+
... r'aa(x,bb(y),cc(z),P(w),u)',
|
473 |
+
... r'bo(?det(?n),@x)']
|
474 |
+
>>> examples = [read_expr(e) for e in examples]
|
475 |
+
|
476 |
+
>>> for e in examples:
|
477 |
+
... print('%-25s' % e, sorted(e.free()))
|
478 |
+
walk(john) []
|
479 |
+
walk(x) [Variable('x')]
|
480 |
+
?vp(?np) []
|
481 |
+
see(john,mary) []
|
482 |
+
exists x.walk(x) []
|
483 |
+
\x.see(john,x) []
|
484 |
+
(\x.see(john,x))(mary) []
|
485 |
+
P(x) [Variable('P'), Variable('x')]
|
486 |
+
\P.P(x) [Variable('x')]
|
487 |
+
aa(x,bb(y),cc(z),P(w),u) [Variable('P'), Variable('u'), Variable('w'), Variable('x'), Variable('y'), Variable('z')]
|
488 |
+
bo(?det(?n),@x) []
|
489 |
+
|
490 |
+
>>> for e in examples:
|
491 |
+
... print('%-25s' % e, sorted(e.constants()))
|
492 |
+
walk(john) [Variable('john')]
|
493 |
+
walk(x) []
|
494 |
+
?vp(?np) [Variable('?np')]
|
495 |
+
see(john,mary) [Variable('john'), Variable('mary')]
|
496 |
+
exists x.walk(x) []
|
497 |
+
\x.see(john,x) [Variable('john')]
|
498 |
+
(\x.see(john,x))(mary) [Variable('john'), Variable('mary')]
|
499 |
+
P(x) []
|
500 |
+
\P.P(x) []
|
501 |
+
aa(x,bb(y),cc(z),P(w),u) []
|
502 |
+
bo(?det(?n),@x) [Variable('?n'), Variable('@x')]
|
503 |
+
|
504 |
+
>>> for e in examples:
|
505 |
+
... print('%-25s' % e, sorted(e.predicates()))
|
506 |
+
walk(john) [Variable('walk')]
|
507 |
+
walk(x) [Variable('walk')]
|
508 |
+
?vp(?np) [Variable('?vp')]
|
509 |
+
see(john,mary) [Variable('see')]
|
510 |
+
exists x.walk(x) [Variable('walk')]
|
511 |
+
\x.see(john,x) [Variable('see')]
|
512 |
+
(\x.see(john,x))(mary) [Variable('see')]
|
513 |
+
P(x) []
|
514 |
+
\P.P(x) []
|
515 |
+
aa(x,bb(y),cc(z),P(w),u) [Variable('aa'), Variable('bb'), Variable('cc')]
|
516 |
+
bo(?det(?n),@x) [Variable('?det'), Variable('bo')]
|
517 |
+
|
518 |
+
>>> for e in examples:
|
519 |
+
... print('%-25s' % e, sorted(e.variables()))
|
520 |
+
walk(john) []
|
521 |
+
walk(x) [Variable('x')]
|
522 |
+
?vp(?np) [Variable('?np'), Variable('?vp')]
|
523 |
+
see(john,mary) []
|
524 |
+
exists x.walk(x) []
|
525 |
+
\x.see(john,x) []
|
526 |
+
(\x.see(john,x))(mary) []
|
527 |
+
P(x) [Variable('P'), Variable('x')]
|
528 |
+
\P.P(x) [Variable('x')]
|
529 |
+
aa(x,bb(y),cc(z),P(w),u) [Variable('P'), Variable('u'), Variable('w'), Variable('x'), Variable('y'), Variable('z')]
|
530 |
+
bo(?det(?n),@x) [Variable('?det'), Variable('?n'), Variable('@x')]
|
531 |
+
|
532 |
+
|
533 |
+
|
534 |
+
`normalize`
|
535 |
+
>>> print(read_expr(r'\e083.(walk(e083, z472) & talk(e092, z938))').normalize())
|
536 |
+
\e01.(walk(e01,z3) & talk(e02,z4))
|
537 |
+
|
538 |
+
Typed Logic
|
539 |
+
+++++++++++
|
540 |
+
|
541 |
+
>>> from nltk.sem.logic import LogicParser
|
542 |
+
>>> tlp = LogicParser(True)
|
543 |
+
>>> print(tlp.parse(r'man(x)').type)
|
544 |
+
?
|
545 |
+
>>> print(tlp.parse(r'walk(angus)').type)
|
546 |
+
?
|
547 |
+
>>> print(tlp.parse(r'-man(x)').type)
|
548 |
+
t
|
549 |
+
>>> print(tlp.parse(r'(man(x) <-> tall(x))').type)
|
550 |
+
t
|
551 |
+
>>> print(tlp.parse(r'exists x.(man(x) & tall(x))').type)
|
552 |
+
t
|
553 |
+
>>> print(tlp.parse(r'\x.man(x)').type)
|
554 |
+
<e,?>
|
555 |
+
>>> print(tlp.parse(r'john').type)
|
556 |
+
e
|
557 |
+
>>> print(tlp.parse(r'\x y.sees(x,y)').type)
|
558 |
+
<e,<e,?>>
|
559 |
+
>>> print(tlp.parse(r'\x.man(x)(john)').type)
|
560 |
+
?
|
561 |
+
>>> print(tlp.parse(r'\x.\y.sees(x,y)(john)').type)
|
562 |
+
<e,?>
|
563 |
+
>>> print(tlp.parse(r'\x.\y.sees(x,y)(john)(mary)').type)
|
564 |
+
?
|
565 |
+
>>> print(tlp.parse(r'\P.\Q.exists x.(P(x) & Q(x))').type)
|
566 |
+
<<e,t>,<<e,t>,t>>
|
567 |
+
>>> print(tlp.parse(r'\x.y').type)
|
568 |
+
<?,e>
|
569 |
+
>>> print(tlp.parse(r'\P.P(x)').type)
|
570 |
+
<<e,?>,?>
|
571 |
+
|
572 |
+
>>> parsed = tlp.parse('see(john,mary)')
|
573 |
+
>>> print(parsed.type)
|
574 |
+
?
|
575 |
+
>>> print(parsed.function)
|
576 |
+
see(john)
|
577 |
+
>>> print(parsed.function.type)
|
578 |
+
<e,?>
|
579 |
+
>>> print(parsed.function.function)
|
580 |
+
see
|
581 |
+
>>> print(parsed.function.function.type)
|
582 |
+
<e,<e,?>>
|
583 |
+
|
584 |
+
>>> parsed = tlp.parse('P(x,y)')
|
585 |
+
>>> print(parsed)
|
586 |
+
P(x,y)
|
587 |
+
>>> print(parsed.type)
|
588 |
+
?
|
589 |
+
>>> print(parsed.function)
|
590 |
+
P(x)
|
591 |
+
>>> print(parsed.function.type)
|
592 |
+
<e,?>
|
593 |
+
>>> print(parsed.function.function)
|
594 |
+
P
|
595 |
+
>>> print(parsed.function.function.type)
|
596 |
+
<e,<e,?>>
|
597 |
+
|
598 |
+
>>> print(tlp.parse(r'P').type)
|
599 |
+
?
|
600 |
+
|
601 |
+
>>> print(tlp.parse(r'P', {'P': 't'}).type)
|
602 |
+
t
|
603 |
+
|
604 |
+
>>> a = tlp.parse(r'P(x)')
|
605 |
+
>>> print(a.type)
|
606 |
+
?
|
607 |
+
>>> print(a.function.type)
|
608 |
+
<e,?>
|
609 |
+
>>> print(a.argument.type)
|
610 |
+
e
|
611 |
+
|
612 |
+
>>> a = tlp.parse(r'-P(x)')
|
613 |
+
>>> print(a.type)
|
614 |
+
t
|
615 |
+
>>> print(a.term.type)
|
616 |
+
t
|
617 |
+
>>> print(a.term.function.type)
|
618 |
+
<e,t>
|
619 |
+
>>> print(a.term.argument.type)
|
620 |
+
e
|
621 |
+
|
622 |
+
>>> a = tlp.parse(r'P & Q')
|
623 |
+
>>> print(a.type)
|
624 |
+
t
|
625 |
+
>>> print(a.first.type)
|
626 |
+
t
|
627 |
+
>>> print(a.second.type)
|
628 |
+
t
|
629 |
+
|
630 |
+
>>> a = tlp.parse(r'(P(x) & Q(x))')
|
631 |
+
>>> print(a.type)
|
632 |
+
t
|
633 |
+
>>> print(a.first.type)
|
634 |
+
t
|
635 |
+
>>> print(a.first.function.type)
|
636 |
+
<e,t>
|
637 |
+
>>> print(a.first.argument.type)
|
638 |
+
e
|
639 |
+
>>> print(a.second.type)
|
640 |
+
t
|
641 |
+
>>> print(a.second.function.type)
|
642 |
+
<e,t>
|
643 |
+
>>> print(a.second.argument.type)
|
644 |
+
e
|
645 |
+
|
646 |
+
>>> a = tlp.parse(r'\x.P(x)')
|
647 |
+
>>> print(a.type)
|
648 |
+
<e,?>
|
649 |
+
>>> print(a.term.function.type)
|
650 |
+
<e,?>
|
651 |
+
>>> print(a.term.argument.type)
|
652 |
+
e
|
653 |
+
|
654 |
+
>>> a = tlp.parse(r'\P.P(x)')
|
655 |
+
>>> print(a.type)
|
656 |
+
<<e,?>,?>
|
657 |
+
>>> print(a.term.function.type)
|
658 |
+
<e,?>
|
659 |
+
>>> print(a.term.argument.type)
|
660 |
+
e
|
661 |
+
|
662 |
+
>>> a = tlp.parse(r'(\x.P(x)(john)) & Q(x)')
|
663 |
+
>>> print(a.type)
|
664 |
+
t
|
665 |
+
>>> print(a.first.type)
|
666 |
+
t
|
667 |
+
>>> print(a.first.function.type)
|
668 |
+
<e,t>
|
669 |
+
>>> print(a.first.function.term.function.type)
|
670 |
+
<e,t>
|
671 |
+
>>> print(a.first.function.term.argument.type)
|
672 |
+
e
|
673 |
+
>>> print(a.first.argument.type)
|
674 |
+
e
|
675 |
+
|
676 |
+
>>> a = tlp.parse(r'\x y.P(x,y)(john)(mary) & Q(x)')
|
677 |
+
>>> print(a.type)
|
678 |
+
t
|
679 |
+
>>> print(a.first.type)
|
680 |
+
t
|
681 |
+
>>> print(a.first.function.type)
|
682 |
+
<e,t>
|
683 |
+
>>> print(a.first.function.function.type)
|
684 |
+
<e,<e,t>>
|
685 |
+
|
686 |
+
>>> a = tlp.parse(r'--P')
|
687 |
+
>>> print(a.type)
|
688 |
+
t
|
689 |
+
>>> print(a.term.type)
|
690 |
+
t
|
691 |
+
>>> print(a.term.term.type)
|
692 |
+
t
|
693 |
+
|
694 |
+
>>> tlp.parse(r'\x y.P(x,y)').type
|
695 |
+
<e,<e,?>>
|
696 |
+
>>> tlp.parse(r'\x y.P(x,y)', {'P': '<e,<e,t>>'}).type
|
697 |
+
<e,<e,t>>
|
698 |
+
|
699 |
+
>>> a = tlp.parse(r'\P y.P(john,y)(\x y.see(x,y))')
|
700 |
+
>>> a.type
|
701 |
+
<e,?>
|
702 |
+
>>> a.function.type
|
703 |
+
<<e,<e,?>>,<e,?>>
|
704 |
+
>>> a.function.term.term.function.function.type
|
705 |
+
<e,<e,?>>
|
706 |
+
>>> a.argument.type
|
707 |
+
<e,<e,?>>
|
708 |
+
|
709 |
+
>>> a = tlp.parse(r'exists c f.(father(c) = f)')
|
710 |
+
>>> a.type
|
711 |
+
t
|
712 |
+
>>> a.term.term.type
|
713 |
+
t
|
714 |
+
>>> a.term.term.first.type
|
715 |
+
e
|
716 |
+
>>> a.term.term.first.function.type
|
717 |
+
<e,e>
|
718 |
+
>>> a.term.term.second.type
|
719 |
+
e
|
720 |
+
|
721 |
+
typecheck()
|
722 |
+
|
723 |
+
>>> a = tlp.parse('P(x)')
|
724 |
+
>>> b = tlp.parse('Q(x)')
|
725 |
+
>>> a.type
|
726 |
+
?
|
727 |
+
>>> c = a & b
|
728 |
+
>>> c.first.type
|
729 |
+
?
|
730 |
+
>>> c.typecheck()
|
731 |
+
{...}
|
732 |
+
>>> c.first.type
|
733 |
+
t
|
734 |
+
|
735 |
+
>>> a = tlp.parse('P(x)')
|
736 |
+
>>> b = tlp.parse('P(x) & Q(x)')
|
737 |
+
>>> a.type
|
738 |
+
?
|
739 |
+
>>> typecheck([a,b])
|
740 |
+
{...}
|
741 |
+
>>> a.type
|
742 |
+
t
|
743 |
+
|
744 |
+
>>> e = tlp.parse(r'man(x)')
|
745 |
+
>>> print(dict((k,str(v)) for k,v in e.typecheck().items()) == {'x': 'e', 'man': '<e,?>'})
|
746 |
+
True
|
747 |
+
>>> sig = {'man': '<e, t>'}
|
748 |
+
>>> e = tlp.parse(r'man(x)', sig)
|
749 |
+
>>> print(e.function.type)
|
750 |
+
<e,t>
|
751 |
+
>>> print(dict((k,str(v)) for k,v in e.typecheck().items()) == {'x': 'e', 'man': '<e,t>'})
|
752 |
+
True
|
753 |
+
>>> print(e.function.type)
|
754 |
+
<e,t>
|
755 |
+
>>> print(dict((k,str(v)) for k,v in e.typecheck(sig).items()) == {'x': 'e', 'man': '<e,t>'})
|
756 |
+
True
|
757 |
+
|
758 |
+
findtype()
|
759 |
+
|
760 |
+
>>> print(tlp.parse(r'man(x)').findtype(Variable('man')))
|
761 |
+
<e,?>
|
762 |
+
>>> print(tlp.parse(r'see(x,y)').findtype(Variable('see')))
|
763 |
+
<e,<e,?>>
|
764 |
+
>>> print(tlp.parse(r'P(Q(R(x)))').findtype(Variable('Q')))
|
765 |
+
?
|
766 |
+
|
767 |
+
reading types from strings
|
768 |
+
|
769 |
+
>>> Type.fromstring('e')
|
770 |
+
e
|
771 |
+
>>> Type.fromstring('<e,t>')
|
772 |
+
<e,t>
|
773 |
+
>>> Type.fromstring('<<e,t>,<e,t>>')
|
774 |
+
<<e,t>,<e,t>>
|
775 |
+
>>> Type.fromstring('<<e,?>,?>')
|
776 |
+
<<e,?>,?>
|
777 |
+
|
778 |
+
alternative type format
|
779 |
+
|
780 |
+
>>> Type.fromstring('e').str()
|
781 |
+
'IND'
|
782 |
+
>>> Type.fromstring('<e,?>').str()
|
783 |
+
'(IND -> ANY)'
|
784 |
+
>>> Type.fromstring('<<e,t>,t>').str()
|
785 |
+
'((IND -> BOOL) -> BOOL)'
|
786 |
+
|
787 |
+
Type.__eq__()
|
788 |
+
|
789 |
+
>>> from nltk.sem.logic import *
|
790 |
+
|
791 |
+
>>> e = ENTITY_TYPE
|
792 |
+
>>> t = TRUTH_TYPE
|
793 |
+
>>> a = ANY_TYPE
|
794 |
+
>>> et = ComplexType(e,t)
|
795 |
+
>>> eet = ComplexType(e,ComplexType(e,t))
|
796 |
+
>>> at = ComplexType(a,t)
|
797 |
+
>>> ea = ComplexType(e,a)
|
798 |
+
>>> aa = ComplexType(a,a)
|
799 |
+
|
800 |
+
>>> e == e
|
801 |
+
True
|
802 |
+
>>> t == t
|
803 |
+
True
|
804 |
+
>>> e == t
|
805 |
+
False
|
806 |
+
>>> a == t
|
807 |
+
False
|
808 |
+
>>> t == a
|
809 |
+
False
|
810 |
+
>>> a == a
|
811 |
+
True
|
812 |
+
>>> et == et
|
813 |
+
True
|
814 |
+
>>> a == et
|
815 |
+
False
|
816 |
+
>>> et == a
|
817 |
+
False
|
818 |
+
>>> a == ComplexType(a,aa)
|
819 |
+
True
|
820 |
+
>>> ComplexType(a,aa) == a
|
821 |
+
True
|
822 |
+
|
823 |
+
matches()
|
824 |
+
|
825 |
+
>>> e.matches(t)
|
826 |
+
False
|
827 |
+
>>> a.matches(t)
|
828 |
+
True
|
829 |
+
>>> t.matches(a)
|
830 |
+
True
|
831 |
+
>>> a.matches(et)
|
832 |
+
True
|
833 |
+
>>> et.matches(a)
|
834 |
+
True
|
835 |
+
>>> ea.matches(eet)
|
836 |
+
True
|
837 |
+
>>> eet.matches(ea)
|
838 |
+
True
|
839 |
+
>>> aa.matches(et)
|
840 |
+
True
|
841 |
+
>>> aa.matches(t)
|
842 |
+
True
|
843 |
+
|
844 |
+
Type error during parsing
|
845 |
+
=========================
|
846 |
+
|
847 |
+
>>> try: print(tlp.parse(r'exists x y.(P(x) & P(x,y))'))
|
848 |
+
... except InconsistentTypeHierarchyException as e: print(e)
|
849 |
+
The variable 'P' was found in multiple places with different types.
|
850 |
+
>>> try: tlp.parse(r'\x y.see(x,y)(\x.man(x))')
|
851 |
+
... except TypeException as e: print(e)
|
852 |
+
The function '\x y.see(x,y)' is of type '<e,<e,?>>' and cannot be applied to '\x.man(x)' of type '<e,?>'. Its argument must match type 'e'.
|
853 |
+
>>> try: tlp.parse(r'\P x y.-P(x,y)(\x.-man(x))')
|
854 |
+
... except TypeException as e: print(e)
|
855 |
+
The function '\P x y.-P(x,y)' is of type '<<e,<e,t>>,<e,<e,t>>>' and cannot be applied to '\x.-man(x)' of type '<e,t>'. Its argument must match type '<e,<e,t>>'.
|
856 |
+
|
857 |
+
>>> a = tlp.parse(r'-talk(x)')
|
858 |
+
>>> signature = a.typecheck()
|
859 |
+
>>> try: print(tlp.parse(r'-talk(x,y)', signature))
|
860 |
+
... except InconsistentTypeHierarchyException as e: print(e)
|
861 |
+
The variable 'talk' was found in multiple places with different types.
|
862 |
+
|
863 |
+
>>> a = tlp.parse(r'-P(x)')
|
864 |
+
>>> b = tlp.parse(r'-P(x,y)')
|
865 |
+
>>> a.typecheck()
|
866 |
+
{...}
|
867 |
+
>>> b.typecheck()
|
868 |
+
{...}
|
869 |
+
>>> try: typecheck([a,b])
|
870 |
+
... except InconsistentTypeHierarchyException as e: print(e)
|
871 |
+
The variable 'P' was found in multiple places with different types.
|
872 |
+
|
873 |
+
>>> a = tlp.parse(r'P(x)')
|
874 |
+
>>> b = tlp.parse(r'P(x,y)')
|
875 |
+
>>> signature = {'P': '<e,t>'}
|
876 |
+
>>> a.typecheck(signature)
|
877 |
+
{...}
|
878 |
+
>>> try: typecheck([a,b], signature)
|
879 |
+
... except InconsistentTypeHierarchyException as e: print(e)
|
880 |
+
The variable 'P' was found in multiple places with different types.
|
881 |
+
|
882 |
+
Parse errors
|
883 |
+
============
|
884 |
+
|
885 |
+
>>> try: read_expr(r'')
|
886 |
+
... except LogicalExpressionException as e: print(e)
|
887 |
+
End of input found. Expression expected.
|
888 |
+
<BLANKLINE>
|
889 |
+
^
|
890 |
+
>>> try: read_expr(r'(')
|
891 |
+
... except LogicalExpressionException as e: print(e)
|
892 |
+
End of input found. Expression expected.
|
893 |
+
(
|
894 |
+
^
|
895 |
+
>>> try: read_expr(r')')
|
896 |
+
... except LogicalExpressionException as e: print(e)
|
897 |
+
Unexpected token: ')'. Expression expected.
|
898 |
+
)
|
899 |
+
^
|
900 |
+
>>> try: read_expr(r'()')
|
901 |
+
... except LogicalExpressionException as e: print(e)
|
902 |
+
Unexpected token: ')'. Expression expected.
|
903 |
+
()
|
904 |
+
^
|
905 |
+
>>> try: read_expr(r'(P(x) & Q(x)')
|
906 |
+
... except LogicalExpressionException as e: print(e)
|
907 |
+
End of input found. Expected token ')'.
|
908 |
+
(P(x) & Q(x)
|
909 |
+
^
|
910 |
+
>>> try: read_expr(r'(P(x) &')
|
911 |
+
... except LogicalExpressionException as e: print(e)
|
912 |
+
End of input found. Expression expected.
|
913 |
+
(P(x) &
|
914 |
+
^
|
915 |
+
>>> try: read_expr(r'(P(x) | )')
|
916 |
+
... except LogicalExpressionException as e: print(e)
|
917 |
+
Unexpected token: ')'. Expression expected.
|
918 |
+
(P(x) | )
|
919 |
+
^
|
920 |
+
>>> try: read_expr(r'P(x) ->')
|
921 |
+
... except LogicalExpressionException as e: print(e)
|
922 |
+
End of input found. Expression expected.
|
923 |
+
P(x) ->
|
924 |
+
^
|
925 |
+
>>> try: read_expr(r'P(x')
|
926 |
+
... except LogicalExpressionException as e: print(e)
|
927 |
+
End of input found. Expected token ')'.
|
928 |
+
P(x
|
929 |
+
^
|
930 |
+
>>> try: read_expr(r'P(x,')
|
931 |
+
... except LogicalExpressionException as e: print(e)
|
932 |
+
End of input found. Expression expected.
|
933 |
+
P(x,
|
934 |
+
^
|
935 |
+
>>> try: read_expr(r'P(x,)')
|
936 |
+
... except LogicalExpressionException as e: print(e)
|
937 |
+
Unexpected token: ')'. Expression expected.
|
938 |
+
P(x,)
|
939 |
+
^
|
940 |
+
>>> try: read_expr(r'exists')
|
941 |
+
... except LogicalExpressionException as e: print(e)
|
942 |
+
End of input found. Variable and Expression expected following quantifier 'exists'.
|
943 |
+
exists
|
944 |
+
^
|
945 |
+
>>> try: read_expr(r'exists x')
|
946 |
+
... except LogicalExpressionException as e: print(e)
|
947 |
+
End of input found. Expression expected.
|
948 |
+
exists x
|
949 |
+
^
|
950 |
+
>>> try: read_expr(r'exists x.')
|
951 |
+
... except LogicalExpressionException as e: print(e)
|
952 |
+
End of input found. Expression expected.
|
953 |
+
exists x.
|
954 |
+
^
|
955 |
+
>>> try: read_expr(r'\ ')
|
956 |
+
... except LogicalExpressionException as e: print(e)
|
957 |
+
End of input found. Variable and Expression expected following lambda operator.
|
958 |
+
\
|
959 |
+
^
|
960 |
+
>>> try: read_expr(r'\ x')
|
961 |
+
... except LogicalExpressionException as e: print(e)
|
962 |
+
End of input found. Expression expected.
|
963 |
+
\ x
|
964 |
+
^
|
965 |
+
>>> try: read_expr(r'\ x y')
|
966 |
+
... except LogicalExpressionException as e: print(e)
|
967 |
+
End of input found. Expression expected.
|
968 |
+
\ x y
|
969 |
+
^
|
970 |
+
>>> try: read_expr(r'\ x.')
|
971 |
+
... except LogicalExpressionException as e: print(e)
|
972 |
+
End of input found. Expression expected.
|
973 |
+
\ x.
|
974 |
+
^
|
975 |
+
>>> try: read_expr(r'P(x)Q(x)')
|
976 |
+
... except LogicalExpressionException as e: print(e)
|
977 |
+
Unexpected token: 'Q'.
|
978 |
+
P(x)Q(x)
|
979 |
+
^
|
980 |
+
>>> try: read_expr(r'(P(x)Q(x)')
|
981 |
+
... except LogicalExpressionException as e: print(e)
|
982 |
+
Unexpected token: 'Q'. Expected token ')'.
|
983 |
+
(P(x)Q(x)
|
984 |
+
^
|
985 |
+
>>> try: read_expr(r'exists x y')
|
986 |
+
... except LogicalExpressionException as e: print(e)
|
987 |
+
End of input found. Expression expected.
|
988 |
+
exists x y
|
989 |
+
^
|
990 |
+
>>> try: read_expr(r'exists x y.')
|
991 |
+
... except LogicalExpressionException as e: print(e)
|
992 |
+
End of input found. Expression expected.
|
993 |
+
exists x y.
|
994 |
+
^
|
995 |
+
>>> try: read_expr(r'exists x -> y')
|
996 |
+
... except LogicalExpressionException as e: print(e)
|
997 |
+
Unexpected token: '->'. Expression expected.
|
998 |
+
exists x -> y
|
999 |
+
^
|
1000 |
+
|
1001 |
+
|
1002 |
+
>>> try: read_expr(r'A -> ((P(x) & Q(x)) -> Z')
|
1003 |
+
... except LogicalExpressionException as e: print(e)
|
1004 |
+
End of input found. Expected token ')'.
|
1005 |
+
A -> ((P(x) & Q(x)) -> Z
|
1006 |
+
^
|
1007 |
+
>>> try: read_expr(r'A -> ((P(x) &) -> Z')
|
1008 |
+
... except LogicalExpressionException as e: print(e)
|
1009 |
+
Unexpected token: ')'. Expression expected.
|
1010 |
+
A -> ((P(x) &) -> Z
|
1011 |
+
^
|
1012 |
+
>>> try: read_expr(r'A -> ((P(x) | )) -> Z')
|
1013 |
+
... except LogicalExpressionException as e: print(e)
|
1014 |
+
Unexpected token: ')'. Expression expected.
|
1015 |
+
A -> ((P(x) | )) -> Z
|
1016 |
+
^
|
1017 |
+
>>> try: read_expr(r'A -> (P(x) ->) -> Z')
|
1018 |
+
... except LogicalExpressionException as e: print(e)
|
1019 |
+
Unexpected token: ')'. Expression expected.
|
1020 |
+
A -> (P(x) ->) -> Z
|
1021 |
+
^
|
1022 |
+
>>> try: read_expr(r'A -> (P(x) -> Z')
|
1023 |
+
... except LogicalExpressionException as e: print(e)
|
1024 |
+
End of input found. Expected token ')'.
|
1025 |
+
A -> (P(x) -> Z
|
1026 |
+
^
|
1027 |
+
>>> try: read_expr(r'A -> (P(x,) -> Z')
|
1028 |
+
... except LogicalExpressionException as e: print(e)
|
1029 |
+
Unexpected token: ')'. Expression expected.
|
1030 |
+
A -> (P(x,) -> Z
|
1031 |
+
^
|
1032 |
+
>>> try: read_expr(r'A -> (P(x,)) -> Z')
|
1033 |
+
... except LogicalExpressionException as e: print(e)
|
1034 |
+
Unexpected token: ')'. Expression expected.
|
1035 |
+
A -> (P(x,)) -> Z
|
1036 |
+
^
|
1037 |
+
>>> try: read_expr(r'A -> (exists) -> Z')
|
1038 |
+
... except LogicalExpressionException as e: print(e)
|
1039 |
+
')' is an illegal variable name. Constants may not be quantified.
|
1040 |
+
A -> (exists) -> Z
|
1041 |
+
^
|
1042 |
+
>>> try: read_expr(r'A -> (exists x) -> Z')
|
1043 |
+
... except LogicalExpressionException as e: print(e)
|
1044 |
+
Unexpected token: ')'. Expression expected.
|
1045 |
+
A -> (exists x) -> Z
|
1046 |
+
^
|
1047 |
+
>>> try: read_expr(r'A -> (exists x.) -> Z')
|
1048 |
+
... except LogicalExpressionException as e: print(e)
|
1049 |
+
Unexpected token: ')'. Expression expected.
|
1050 |
+
A -> (exists x.) -> Z
|
1051 |
+
^
|
1052 |
+
>>> try: read_expr(r'A -> (\ ) -> Z')
|
1053 |
+
... except LogicalExpressionException as e: print(e)
|
1054 |
+
')' is an illegal variable name. Constants may not be abstracted.
|
1055 |
+
A -> (\ ) -> Z
|
1056 |
+
^
|
1057 |
+
>>> try: read_expr(r'A -> (\ x) -> Z')
|
1058 |
+
... except LogicalExpressionException as e: print(e)
|
1059 |
+
Unexpected token: ')'. Expression expected.
|
1060 |
+
A -> (\ x) -> Z
|
1061 |
+
^
|
1062 |
+
>>> try: read_expr(r'A -> (\ x y) -> Z')
|
1063 |
+
... except LogicalExpressionException as e: print(e)
|
1064 |
+
Unexpected token: ')'. Expression expected.
|
1065 |
+
A -> (\ x y) -> Z
|
1066 |
+
^
|
1067 |
+
>>> try: read_expr(r'A -> (\ x.) -> Z')
|
1068 |
+
... except LogicalExpressionException as e: print(e)
|
1069 |
+
Unexpected token: ')'. Expression expected.
|
1070 |
+
A -> (\ x.) -> Z
|
1071 |
+
^
|
1072 |
+
>>> try: read_expr(r'A -> (P(x)Q(x)) -> Z')
|
1073 |
+
... except LogicalExpressionException as e: print(e)
|
1074 |
+
Unexpected token: 'Q'. Expected token ')'.
|
1075 |
+
A -> (P(x)Q(x)) -> Z
|
1076 |
+
^
|
1077 |
+
>>> try: read_expr(r'A -> ((P(x)Q(x)) -> Z')
|
1078 |
+
... except LogicalExpressionException as e: print(e)
|
1079 |
+
Unexpected token: 'Q'. Expected token ')'.
|
1080 |
+
A -> ((P(x)Q(x)) -> Z
|
1081 |
+
^
|
1082 |
+
>>> try: read_expr(r'A -> (all x y) -> Z')
|
1083 |
+
... except LogicalExpressionException as e: print(e)
|
1084 |
+
Unexpected token: ')'. Expression expected.
|
1085 |
+
A -> (all x y) -> Z
|
1086 |
+
^
|
1087 |
+
>>> try: read_expr(r'A -> (exists x y.) -> Z')
|
1088 |
+
... except LogicalExpressionException as e: print(e)
|
1089 |
+
Unexpected token: ')'. Expression expected.
|
1090 |
+
A -> (exists x y.) -> Z
|
1091 |
+
^
|
1092 |
+
>>> try: read_expr(r'A -> (exists x -> y) -> Z')
|
1093 |
+
... except LogicalExpressionException as e: print(e)
|
1094 |
+
Unexpected token: '->'. Expression expected.
|
1095 |
+
A -> (exists x -> y) -> Z
|
1096 |
+
^
|
lib/python3.10/site-packages/nltk/test/metrics.doctest
ADDED
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=======
|
5 |
+
Metrics
|
6 |
+
=======
|
7 |
+
|
8 |
+
-----
|
9 |
+
Setup
|
10 |
+
-----
|
11 |
+
|
12 |
+
>>> import pytest
|
13 |
+
>>> _ = pytest.importorskip("numpy")
|
14 |
+
|
15 |
+
|
16 |
+
The `nltk.metrics` package provides a variety of *evaluation measures*
|
17 |
+
which can be used for a wide variety of NLP tasks.
|
18 |
+
|
19 |
+
>>> from nltk.metrics import *
|
20 |
+
|
21 |
+
------------------
|
22 |
+
Standard IR Scores
|
23 |
+
------------------
|
24 |
+
|
25 |
+
We can use standard scores from information retrieval to test the
|
26 |
+
performance of taggers, chunkers, etc.
|
27 |
+
|
28 |
+
>>> reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
|
29 |
+
>>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
|
30 |
+
>>> print(accuracy(reference, test))
|
31 |
+
0.8
|
32 |
+
|
33 |
+
|
34 |
+
The following measures apply to sets:
|
35 |
+
|
36 |
+
>>> reference_set = set(reference)
|
37 |
+
>>> test_set = set(test)
|
38 |
+
>>> precision(reference_set, test_set)
|
39 |
+
1.0
|
40 |
+
>>> print(recall(reference_set, test_set))
|
41 |
+
0.8
|
42 |
+
>>> print(f_measure(reference_set, test_set))
|
43 |
+
0.88888888888...
|
44 |
+
|
45 |
+
Measuring the likelihood of the data, given probability distributions:
|
46 |
+
|
47 |
+
>>> from nltk import FreqDist, MLEProbDist
|
48 |
+
>>> pdist1 = MLEProbDist(FreqDist("aldjfalskfjaldsf"))
|
49 |
+
>>> pdist2 = MLEProbDist(FreqDist("aldjfalssjjlldss"))
|
50 |
+
>>> print(log_likelihood(['a', 'd'], [pdist1, pdist2]))
|
51 |
+
-2.7075187496...
|
52 |
+
|
53 |
+
|
54 |
+
----------------
|
55 |
+
Distance Metrics
|
56 |
+
----------------
|
57 |
+
|
58 |
+
String edit distance (Levenshtein):
|
59 |
+
|
60 |
+
>>> edit_distance("rain", "shine")
|
61 |
+
3
|
62 |
+
>>> edit_distance_align("shine", "shine")
|
63 |
+
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]
|
64 |
+
>>> edit_distance_align("rain", "brainy")
|
65 |
+
[(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (4, 6)]
|
66 |
+
>>> edit_distance_align("", "brainy")
|
67 |
+
[(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6)]
|
68 |
+
>>> edit_distance_align("", "")
|
69 |
+
[(0, 0)]
|
70 |
+
|
71 |
+
Other distance measures:
|
72 |
+
|
73 |
+
>>> s1 = set([1,2,3,4])
|
74 |
+
>>> s2 = set([3,4,5])
|
75 |
+
>>> binary_distance(s1, s2)
|
76 |
+
1.0
|
77 |
+
>>> print(jaccard_distance(s1, s2))
|
78 |
+
0.6
|
79 |
+
>>> print(masi_distance(s1, s2))
|
80 |
+
0.868
|
81 |
+
|
82 |
+
----------------------
|
83 |
+
Miscellaneous Measures
|
84 |
+
----------------------
|
85 |
+
|
86 |
+
Rank Correlation works with two dictionaries mapping keys to ranks.
|
87 |
+
The dictionaries should have the same set of keys.
|
88 |
+
|
89 |
+
>>> spearman_correlation({'e':1, 't':2, 'a':3}, {'e':1, 'a':2, 't':3})
|
90 |
+
0.5
|
91 |
+
|
92 |
+
Windowdiff uses a sliding window in comparing two segmentations of the same input (e.g. tokenizations, chunkings).
|
93 |
+
Segmentations are represented using strings of zeros and ones.
|
94 |
+
|
95 |
+
>>> s1 = "000100000010"
|
96 |
+
>>> s2 = "000010000100"
|
97 |
+
>>> s3 = "100000010000"
|
98 |
+
>>> s4 = "000000000000"
|
99 |
+
>>> s5 = "111111111111"
|
100 |
+
>>> windowdiff(s1, s1, 3)
|
101 |
+
0.0
|
102 |
+
>>> abs(windowdiff(s1, s2, 3) - 0.3) < 1e-6 # windowdiff(s1, s2, 3) == 0.3
|
103 |
+
True
|
104 |
+
>>> abs(windowdiff(s2, s3, 3) - 0.8) < 1e-6 # windowdiff(s2, s3, 3) == 0.8
|
105 |
+
True
|
106 |
+
>>> windowdiff(s1, s4, 3)
|
107 |
+
0.5
|
108 |
+
>>> windowdiff(s1, s5, 3)
|
109 |
+
1.0
|
110 |
+
|
111 |
+
----------------
|
112 |
+
Confusion Matrix
|
113 |
+
----------------
|
114 |
+
|
115 |
+
>>> reference = 'This is the reference data. Testing 123. aoaeoeoe'
|
116 |
+
>>> test = 'Thos iz_the rifirenci data. Testeng 123. aoaeoeoe'
|
117 |
+
>>> print(ConfusionMatrix(reference, test))
|
118 |
+
| . 1 2 3 T _ a c d e f g h i n o r s t z |
|
119 |
+
--+-------------------------------------------+
|
120 |
+
|<8>. . . . . 1 . . . . . . . . . . . . . . |
|
121 |
+
. | .<2>. . . . . . . . . . . . . . . . . . . |
|
122 |
+
1 | . .<1>. . . . . . . . . . . . . . . . . . |
|
123 |
+
2 | . . .<1>. . . . . . . . . . . . . . . . . |
|
124 |
+
3 | . . . .<1>. . . . . . . . . . . . . . . . |
|
125 |
+
T | . . . . .<2>. . . . . . . . . . . . . . . |
|
126 |
+
_ | . . . . . .<.>. . . . . . . . . . . . . . |
|
127 |
+
a | . . . . . . .<4>. . . . . . . . . . . . . |
|
128 |
+
c | . . . . . . . .<1>. . . . . . . . . . . . |
|
129 |
+
d | . . . . . . . . .<1>. . . . . . . . . . . |
|
130 |
+
e | . . . . . . . . . .<6>. . . 3 . . . . . . |
|
131 |
+
f | . . . . . . . . . . .<1>. . . . . . . . . |
|
132 |
+
g | . . . . . . . . . . . .<1>. . . . . . . . |
|
133 |
+
h | . . . . . . . . . . . . .<2>. . . . . . . |
|
134 |
+
i | . . . . . . . . . . 1 . . .<1>. 1 . . . . |
|
135 |
+
n | . . . . . . . . . . . . . . .<2>. . . . . |
|
136 |
+
o | . . . . . . . . . . . . . . . .<3>. . . . |
|
137 |
+
r | . . . . . . . . . . . . . . . . .<2>. . . |
|
138 |
+
s | . . . . . . . . . . . . . . . . . .<2>. 1 |
|
139 |
+
t | . . . . . . . . . . . . . . . . . . .<3>. |
|
140 |
+
z | . . . . . . . . . . . . . . . . . . . .<.>|
|
141 |
+
--+-------------------------------------------+
|
142 |
+
(row = reference; col = test)
|
143 |
+
<BLANKLINE>
|
144 |
+
|
145 |
+
>>> cm = ConfusionMatrix(reference, test)
|
146 |
+
>>> print(cm.pretty_format(sort_by_count=True))
|
147 |
+
| e a i o s t . T h n r 1 2 3 c d f g _ z |
|
148 |
+
--+-------------------------------------------+
|
149 |
+
|<8>. . . . . . . . . . . . . . . . . . 1 . |
|
150 |
+
e | .<6>. 3 . . . . . . . . . . . . . . . . . |
|
151 |
+
a | . .<4>. . . . . . . . . . . . . . . . . . |
|
152 |
+
i | . 1 .<1>1 . . . . . . . . . . . . . . . . |
|
153 |
+
o | . . . .<3>. . . . . . . . . . . . . . . . |
|
154 |
+
s | . . . . .<2>. . . . . . . . . . . . . . 1 |
|
155 |
+
t | . . . . . .<3>. . . . . . . . . . . . . . |
|
156 |
+
. | . . . . . . .<2>. . . . . . . . . . . . . |
|
157 |
+
T | . . . . . . . .<2>. . . . . . . . . . . . |
|
158 |
+
h | . . . . . . . . .<2>. . . . . . . . . . . |
|
159 |
+
n | . . . . . . . . . .<2>. . . . . . . . . . |
|
160 |
+
r | . . . . . . . . . . .<2>. . . . . . . . . |
|
161 |
+
1 | . . . . . . . . . . . .<1>. . . . . . . . |
|
162 |
+
2 | . . . . . . . . . . . . .<1>. . . . . . . |
|
163 |
+
3 | . . . . . . . . . . . . . .<1>. . . . . . |
|
164 |
+
c | . . . . . . . . . . . . . . .<1>. . . . . |
|
165 |
+
d | . . . . . . . . . . . . . . . .<1>. . . . |
|
166 |
+
f | . . . . . . . . . . . . . . . . .<1>. . . |
|
167 |
+
g | . . . . . . . . . . . . . . . . . .<1>. . |
|
168 |
+
_ | . . . . . . . . . . . . . . . . . . .<.>. |
|
169 |
+
z | . . . . . . . . . . . . . . . . . . . .<.>|
|
170 |
+
--+-------------------------------------------+
|
171 |
+
(row = reference; col = test)
|
172 |
+
<BLANKLINE>
|
173 |
+
|
174 |
+
>>> print(cm.pretty_format(sort_by_count=True, truncate=10))
|
175 |
+
| e a i o s t . T h |
|
176 |
+
--+---------------------+
|
177 |
+
|<8>. . . . . . . . . |
|
178 |
+
e | .<6>. 3 . . . . . . |
|
179 |
+
a | . .<4>. . . . . . . |
|
180 |
+
i | . 1 .<1>1 . . . . . |
|
181 |
+
o | . . . .<3>. . . . . |
|
182 |
+
s | . . . . .<2>. . . . |
|
183 |
+
t | . . . . . .<3>. . . |
|
184 |
+
. | . . . . . . .<2>. . |
|
185 |
+
T | . . . . . . . .<2>. |
|
186 |
+
h | . . . . . . . . .<2>|
|
187 |
+
--+---------------------+
|
188 |
+
(row = reference; col = test)
|
189 |
+
<BLANKLINE>
|
190 |
+
|
191 |
+
>>> print(cm.pretty_format(sort_by_count=True, truncate=10, values_in_chart=False))
|
192 |
+
| 1 |
|
193 |
+
| 1 2 3 4 5 6 7 8 9 0 |
|
194 |
+
---+---------------------+
|
195 |
+
1 |<8>. . . . . . . . . |
|
196 |
+
2 | .<6>. 3 . . . . . . |
|
197 |
+
3 | . .<4>. . . . . . . |
|
198 |
+
4 | . 1 .<1>1 . . . . . |
|
199 |
+
5 | . . . .<3>. . . . . |
|
200 |
+
6 | . . . . .<2>. . . . |
|
201 |
+
7 | . . . . . .<3>. . . |
|
202 |
+
8 | . . . . . . .<2>. . |
|
203 |
+
9 | . . . . . . . .<2>. |
|
204 |
+
10 | . . . . . . . . .<2>|
|
205 |
+
---+---------------------+
|
206 |
+
(row = reference; col = test)
|
207 |
+
Value key:
|
208 |
+
1:
|
209 |
+
2: e
|
210 |
+
3: a
|
211 |
+
4: i
|
212 |
+
5: o
|
213 |
+
6: s
|
214 |
+
7: t
|
215 |
+
8: .
|
216 |
+
9: T
|
217 |
+
10: h
|
218 |
+
<BLANKLINE>
|
219 |
+
|
220 |
+
For "e", the number of true positives should be 6, while the number of false negatives is 3.
|
221 |
+
So, the recall ought to be 6 / (6 + 3):
|
222 |
+
|
223 |
+
>>> cm.recall("e") # doctest: +ELLIPSIS
|
224 |
+
0.666666...
|
225 |
+
|
226 |
+
For "e", the false positive is just 1, so the precision should be 6 / (6 + 1):
|
227 |
+
|
228 |
+
>>> cm.precision("e") # doctest: +ELLIPSIS
|
229 |
+
0.857142...
|
230 |
+
|
231 |
+
The f-measure with default value of ``alpha = 0.5`` should then be:
|
232 |
+
|
233 |
+
* *1/(alpha/p + (1-alpha)/r) =*
|
234 |
+
* *1/(0.5/p + 0.5/r) =*
|
235 |
+
* *2pr / (p + r) =*
|
236 |
+
* *2 * 0.857142... * 0.666666... / (0.857142... + 0.666666...) =*
|
237 |
+
* *0.749999...*
|
238 |
+
|
239 |
+
>>> cm.f_measure("e") # doctest: +ELLIPSIS
|
240 |
+
0.749999...
|
241 |
+
|
242 |
+
--------------------
|
243 |
+
Association measures
|
244 |
+
--------------------
|
245 |
+
|
246 |
+
These measures are useful to determine whether the coocurrence of two random
|
247 |
+
events is meaningful. They are used, for instance, to distinguish collocations
|
248 |
+
from other pairs of adjacent words.
|
249 |
+
|
250 |
+
We bring some examples of bigram association calculations from Manning and
|
251 |
+
Schutze's SNLP, 2nd Ed. chapter 5.
|
252 |
+
|
253 |
+
>>> n_new_companies, n_new, n_companies, N = 8, 15828, 4675, 14307668
|
254 |
+
>>> bam = BigramAssocMeasures
|
255 |
+
>>> bam.raw_freq(20, (42, 20), N) == 20. / N
|
256 |
+
True
|
257 |
+
>>> bam.student_t(n_new_companies, (n_new, n_companies), N)
|
258 |
+
0.999...
|
259 |
+
>>> bam.chi_sq(n_new_companies, (n_new, n_companies), N)
|
260 |
+
1.54...
|
261 |
+
>>> bam.likelihood_ratio(150, (12593, 932), N)
|
262 |
+
1291...
|
263 |
+
|
264 |
+
For other associations, we ensure the ordering of the measures:
|
265 |
+
|
266 |
+
>>> bam.mi_like(20, (42, 20), N) > bam.mi_like(20, (41, 27), N)
|
267 |
+
True
|
268 |
+
>>> bam.pmi(20, (42, 20), N) > bam.pmi(20, (41, 27), N)
|
269 |
+
True
|
270 |
+
>>> bam.phi_sq(20, (42, 20), N) > bam.phi_sq(20, (41, 27), N)
|
271 |
+
True
|
272 |
+
>>> bam.poisson_stirling(20, (42, 20), N) > bam.poisson_stirling(20, (41, 27), N)
|
273 |
+
True
|
274 |
+
>>> bam.jaccard(20, (42, 20), N) > bam.jaccard(20, (41, 27), N)
|
275 |
+
True
|
276 |
+
>>> bam.dice(20, (42, 20), N) > bam.dice(20, (41, 27), N)
|
277 |
+
True
|
278 |
+
>>> bam.fisher(20, (42, 20), N) > bam.fisher(20, (41, 27), N) # doctest: +SKIP
|
279 |
+
False
|
280 |
+
|
281 |
+
For trigrams, we have to provide more count information:
|
282 |
+
|
283 |
+
>>> n_w1_w2_w3 = 20
|
284 |
+
>>> n_w1_w2, n_w1_w3, n_w2_w3 = 35, 60, 40
|
285 |
+
>>> pair_counts = (n_w1_w2, n_w1_w3, n_w2_w3)
|
286 |
+
>>> n_w1, n_w2, n_w3 = 100, 200, 300
|
287 |
+
>>> uni_counts = (n_w1, n_w2, n_w3)
|
288 |
+
>>> N = 14307668
|
289 |
+
>>> tam = TrigramAssocMeasures
|
290 |
+
>>> tam.raw_freq(n_w1_w2_w3, pair_counts, uni_counts, N) == 1. * n_w1_w2_w3 / N
|
291 |
+
True
|
292 |
+
>>> uni_counts2 = (n_w1, n_w2, 100)
|
293 |
+
>>> tam.student_t(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.student_t(n_w1_w2_w3, pair_counts, uni_counts, N)
|
294 |
+
True
|
295 |
+
>>> tam.chi_sq(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.chi_sq(n_w1_w2_w3, pair_counts, uni_counts, N)
|
296 |
+
True
|
297 |
+
>>> tam.mi_like(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.mi_like(n_w1_w2_w3, pair_counts, uni_counts, N)
|
298 |
+
True
|
299 |
+
>>> tam.pmi(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.pmi(n_w1_w2_w3, pair_counts, uni_counts, N)
|
300 |
+
True
|
301 |
+
>>> tam.likelihood_ratio(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.likelihood_ratio(n_w1_w2_w3, pair_counts, uni_counts, N)
|
302 |
+
True
|
303 |
+
>>> tam.poisson_stirling(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.poisson_stirling(n_w1_w2_w3, pair_counts, uni_counts, N)
|
304 |
+
True
|
305 |
+
>>> tam.jaccard(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.jaccard(n_w1_w2_w3, pair_counts, uni_counts, N)
|
306 |
+
True
|
307 |
+
|
308 |
+
|
309 |
+
For fourgrams, we have to provide more count information:
|
310 |
+
|
311 |
+
>>> n_w1_w2_w3_w4 = 5
|
312 |
+
>>> n_w1_w2, n_w1_w3, n_w2_w3 = 35, 60, 40
|
313 |
+
>>> n_w1_w2_w3, n_w2_w3_w4 = 20, 10
|
314 |
+
>>> pair_counts = (n_w1_w2, n_w1_w3, n_w2_w3)
|
315 |
+
>>> triplet_counts = (n_w1_w2_w3, n_w2_w3_w4)
|
316 |
+
>>> n_w1, n_w2, n_w3, n_w4 = 100, 200, 300, 400
|
317 |
+
>>> uni_counts = (n_w1, n_w2, n_w3, n_w4)
|
318 |
+
>>> N = 14307668
|
319 |
+
>>> qam = QuadgramAssocMeasures
|
320 |
+
>>> qam.raw_freq(n_w1_w2_w3_w4, pair_counts, triplet_counts, uni_counts, N) == 1. * n_w1_w2_w3_w4 / N
|
321 |
+
True
|
lib/python3.10/site-packages/nltk/test/misc.doctest
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
--------------------------------------------------------------------------------
|
5 |
+
Unit tests for the miscellaneous sort functions.
|
6 |
+
--------------------------------------------------------------------------------
|
7 |
+
|
8 |
+
>>> from copy import deepcopy
|
9 |
+
>>> from nltk.misc.sort import *
|
10 |
+
|
11 |
+
A (very) small list of unsorted integers.
|
12 |
+
|
13 |
+
>>> test_data = [12, 67, 7, 28, 92, 56, 53, 720, 91, 57, 20, 20]
|
14 |
+
|
15 |
+
Test each sorting method - each method returns the number of operations
|
16 |
+
required to sort the data, and sorts in-place (desctructively - hence the need
|
17 |
+
for multiple copies).
|
18 |
+
|
19 |
+
>>> sorted_data = deepcopy(test_data)
|
20 |
+
>>> selection(sorted_data)
|
21 |
+
66
|
22 |
+
|
23 |
+
>>> sorted_data
|
24 |
+
[7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
|
25 |
+
|
26 |
+
>>> sorted_data = deepcopy(test_data)
|
27 |
+
>>> bubble(sorted_data)
|
28 |
+
30
|
29 |
+
|
30 |
+
>>> sorted_data
|
31 |
+
[7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
|
32 |
+
|
33 |
+
>>> sorted_data = deepcopy(test_data)
|
34 |
+
>>> merge(sorted_data)
|
35 |
+
30
|
36 |
+
|
37 |
+
>>> sorted_data
|
38 |
+
[7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
|
39 |
+
|
40 |
+
>>> sorted_data = deepcopy(test_data)
|
41 |
+
>>> quick(sorted_data)
|
42 |
+
13
|
43 |
+
|
44 |
+
>>> sorted_data
|
45 |
+
[7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
|
46 |
+
|
47 |
+
--------------------------------------------------------------------------------
|
48 |
+
Unit tests for Wordfinder class
|
49 |
+
--------------------------------------------------------------------------------
|
50 |
+
|
51 |
+
>>> import random
|
52 |
+
|
53 |
+
>>> # The following is not enough for reproducibility under Python 2/3
|
54 |
+
>>> # (see https://bugs.python.org/issue9025) so this test is skipped.
|
55 |
+
>>> random.seed(12345)
|
56 |
+
|
57 |
+
>>> from nltk.misc import wordfinder
|
58 |
+
>>> wordfinder.word_finder() # doctest: +SKIP
|
59 |
+
Word Finder
|
60 |
+
<BLANKLINE>
|
61 |
+
J V L A I R O T A T I S I V O D E R E T
|
62 |
+
H U U B E A R O E P O C S O R E T N E P
|
63 |
+
A D A U Z E E S R A P P A L L M E N T R
|
64 |
+
C X A D Q S Z T P E O R S N G P J A D E
|
65 |
+
I G Y K K T I A A R G F I D T E L C N S
|
66 |
+
R E C N B H T R L T N N B W N T A O A I
|
67 |
+
A Y I L O E I A M E I A A Y U R P L L D
|
68 |
+
G L T V S T S F E A D I P H D O O H N I
|
69 |
+
R L S E C I N I L R N N M E C G R U E A
|
70 |
+
A A Y G I C E N L L E O I G Q R T A E L
|
71 |
+
M R C E T I S T A E T L L E U A E N R L
|
72 |
+
O U O T A S E E C S O O N H Y P A T G Y
|
73 |
+
E M H O M M D R E S F P U L T H C F N V
|
74 |
+
L A C A I M A M A N L B R U T E D O M I
|
75 |
+
O R I L N E E E E E U A R S C R Y L I P
|
76 |
+
H T R K E S N N M S I L A S R E V I N U
|
77 |
+
T X T A A O U T K S E T A R R E S I B J
|
78 |
+
A E D L E L J I F O O R P E L K N I R W
|
79 |
+
K H A I D E Q O P R I C K T I M B E R P
|
80 |
+
Z K D O O H G N I H T U R V E Y D R O P
|
81 |
+
<BLANKLINE>
|
82 |
+
1: INTERCHANGER
|
83 |
+
2: TEARLESSNESS
|
84 |
+
3: UNIVERSALISM
|
85 |
+
4: DESENSITIZER
|
86 |
+
5: INTERMENTION
|
87 |
+
6: TRICHOCYSTIC
|
88 |
+
7: EXTRAMURALLY
|
89 |
+
8: VEGETOALKALI
|
90 |
+
9: PALMELLACEAE
|
91 |
+
10: AESTHETICISM
|
92 |
+
11: PETROGRAPHER
|
93 |
+
12: VISITATORIAL
|
94 |
+
13: OLEOMARGARIC
|
95 |
+
14: WRINKLEPROOF
|
96 |
+
15: PRICKTIMBER
|
97 |
+
16: PRESIDIALLY
|
98 |
+
17: SCITAMINEAE
|
99 |
+
18: ENTEROSCOPE
|
100 |
+
19: APPALLMENT
|
101 |
+
20: TURVEYDROP
|
102 |
+
21: THINGHOOD
|
103 |
+
22: BISERRATE
|
104 |
+
23: GREENLAND
|
105 |
+
24: BRUTEDOM
|
106 |
+
25: POLONIAN
|
107 |
+
26: ACOLHUAN
|
108 |
+
27: LAPORTEA
|
109 |
+
28: TENDING
|
110 |
+
29: TEREDO
|
111 |
+
30: MESOLE
|
112 |
+
31: UNLIMP
|
113 |
+
32: OSTARA
|
114 |
+
33: PILY
|
115 |
+
34: DUNT
|
116 |
+
35: ONYX
|
117 |
+
36: KATH
|
118 |
+
37: JUNE
|
lib/python3.10/site-packages/nltk/test/probability.doctest
ADDED
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===========
|
5 |
+
Probability
|
6 |
+
===========
|
7 |
+
|
8 |
+
>>> from nltk.test.probability_fixt import setup_module
|
9 |
+
>>> setup_module()
|
10 |
+
|
11 |
+
>>> import nltk
|
12 |
+
>>> from nltk.probability import *
|
13 |
+
|
14 |
+
FreqDist
|
15 |
+
--------
|
16 |
+
|
17 |
+
>>> text1 = ['no', 'good', 'fish', 'goes', 'anywhere', 'without', 'a', 'porpoise', '!']
|
18 |
+
>>> text2 = ['no', 'good', 'porpoise', 'likes', 'to', 'fish', 'fish', 'anywhere', '.']
|
19 |
+
|
20 |
+
>>> fd1 = nltk.FreqDist(text1)
|
21 |
+
>>> fd1 == nltk.FreqDist(text1)
|
22 |
+
True
|
23 |
+
|
24 |
+
Note that items are sorted in order of decreasing frequency; two items of the same frequency appear in indeterminate order.
|
25 |
+
|
26 |
+
>>> import itertools
|
27 |
+
>>> both = nltk.FreqDist(text1 + text2)
|
28 |
+
>>> both_most_common = both.most_common()
|
29 |
+
>>> list(itertools.chain(*(sorted(ys) for k, ys in itertools.groupby(both_most_common, key=lambda t: t[1]))))
|
30 |
+
[('fish', 3), ('anywhere', 2), ('good', 2), ('no', 2), ('porpoise', 2), ('!', 1), ('.', 1), ('a', 1), ('goes', 1), ('likes', 1), ('to', 1), ('without', 1)]
|
31 |
+
|
32 |
+
>>> both == fd1 + nltk.FreqDist(text2)
|
33 |
+
True
|
34 |
+
>>> fd1 == nltk.FreqDist(text1) # But fd1 is unchanged
|
35 |
+
True
|
36 |
+
|
37 |
+
>>> fd2 = nltk.FreqDist(text2)
|
38 |
+
>>> fd1.update(fd2)
|
39 |
+
>>> fd1 == both
|
40 |
+
True
|
41 |
+
|
42 |
+
>>> fd1 = nltk.FreqDist(text1)
|
43 |
+
>>> fd1.update(text2)
|
44 |
+
>>> fd1 == both
|
45 |
+
True
|
46 |
+
|
47 |
+
>>> fd1 = nltk.FreqDist(text1)
|
48 |
+
>>> fd2 = nltk.FreqDist(fd1)
|
49 |
+
>>> fd2 == fd1
|
50 |
+
True
|
51 |
+
|
52 |
+
``nltk.FreqDist`` can be pickled:
|
53 |
+
|
54 |
+
>>> import pickle
|
55 |
+
>>> fd1 = nltk.FreqDist(text1)
|
56 |
+
>>> pickled = pickle.dumps(fd1)
|
57 |
+
>>> fd1 == pickle.loads(pickled)
|
58 |
+
True
|
59 |
+
|
60 |
+
Mathematical operations:
|
61 |
+
|
62 |
+
>>> FreqDist('abbb') + FreqDist('bcc')
|
63 |
+
FreqDist({'b': 4, 'c': 2, 'a': 1})
|
64 |
+
>>> FreqDist('abbbc') - FreqDist('bccd')
|
65 |
+
FreqDist({'b': 2, 'a': 1})
|
66 |
+
>>> FreqDist('abbb') | FreqDist('bcc')
|
67 |
+
FreqDist({'b': 3, 'c': 2, 'a': 1})
|
68 |
+
>>> FreqDist('abbb') & FreqDist('bcc')
|
69 |
+
FreqDist({'b': 1})
|
70 |
+
|
71 |
+
ConditionalFreqDist
|
72 |
+
-------------------
|
73 |
+
|
74 |
+
>>> cfd1 = ConditionalFreqDist()
|
75 |
+
>>> cfd1[1] = FreqDist('abbbb')
|
76 |
+
>>> cfd1[2] = FreqDist('xxxxyy')
|
77 |
+
>>> cfd1
|
78 |
+
<ConditionalFreqDist with 2 conditions>
|
79 |
+
|
80 |
+
>>> cfd2 = ConditionalFreqDist()
|
81 |
+
>>> cfd2[1] = FreqDist('bbccc')
|
82 |
+
>>> cfd2[2] = FreqDist('xxxyyyzz')
|
83 |
+
>>> cfd2[3] = FreqDist('m')
|
84 |
+
>>> cfd2
|
85 |
+
<ConditionalFreqDist with 3 conditions>
|
86 |
+
|
87 |
+
>>> r = cfd1 + cfd2
|
88 |
+
>>> [(i,r[i]) for i in r.conditions()]
|
89 |
+
[(1, FreqDist({'b': 6, 'c': 3, 'a': 1})), (2, FreqDist({'x': 7, 'y': 5, 'z': 2})), (3, FreqDist({'m': 1}))]
|
90 |
+
|
91 |
+
>>> r = cfd1 - cfd2
|
92 |
+
>>> [(i,r[i]) for i in r.conditions()]
|
93 |
+
[(1, FreqDist({'b': 2, 'a': 1})), (2, FreqDist({'x': 1}))]
|
94 |
+
|
95 |
+
>>> r = cfd1 | cfd2
|
96 |
+
>>> [(i,r[i]) for i in r.conditions()]
|
97 |
+
[(1, FreqDist({'b': 4, 'c': 3, 'a': 1})), (2, FreqDist({'x': 4, 'y': 3, 'z': 2})), (3, FreqDist({'m': 1}))]
|
98 |
+
|
99 |
+
>>> r = cfd1 & cfd2
|
100 |
+
>>> [(i,r[i]) for i in r.conditions()]
|
101 |
+
[(1, FreqDist({'b': 2})), (2, FreqDist({'x': 3, 'y': 2}))]
|
102 |
+
|
103 |
+
Testing some HMM estimators
|
104 |
+
---------------------------
|
105 |
+
|
106 |
+
We extract a small part (500 sentences) of the Brown corpus
|
107 |
+
|
108 |
+
>>> corpus = nltk.corpus.brown.tagged_sents(categories='adventure')[:500]
|
109 |
+
>>> print(len(corpus))
|
110 |
+
500
|
111 |
+
|
112 |
+
We create a HMM trainer - note that we need the tags and symbols
|
113 |
+
from the whole corpus, not just the training corpus
|
114 |
+
|
115 |
+
>>> from nltk.util import unique_list
|
116 |
+
>>> tag_set = unique_list(tag for sent in corpus for (word,tag) in sent)
|
117 |
+
>>> print(len(tag_set))
|
118 |
+
92
|
119 |
+
>>> symbols = unique_list(word for sent in corpus for (word,tag) in sent)
|
120 |
+
>>> print(len(symbols))
|
121 |
+
1464
|
122 |
+
>>> trainer = nltk.tag.HiddenMarkovModelTrainer(tag_set, symbols)
|
123 |
+
|
124 |
+
We divide the corpus into 90% training and 10% testing
|
125 |
+
|
126 |
+
>>> train_corpus = []
|
127 |
+
>>> test_corpus = []
|
128 |
+
>>> for i in range(len(corpus)):
|
129 |
+
... if i % 10:
|
130 |
+
... train_corpus += [corpus[i]]
|
131 |
+
... else:
|
132 |
+
... test_corpus += [corpus[i]]
|
133 |
+
>>> print(len(train_corpus))
|
134 |
+
450
|
135 |
+
>>> print(len(test_corpus))
|
136 |
+
50
|
137 |
+
|
138 |
+
And now we can test the estimators
|
139 |
+
|
140 |
+
>>> def train_and_test(est):
|
141 |
+
... hmm = trainer.train_supervised(train_corpus, estimator=est)
|
142 |
+
... print('%.2f%%' % (100 * hmm.accuracy(test_corpus)))
|
143 |
+
|
144 |
+
Maximum Likelihood Estimation
|
145 |
+
-----------------------------
|
146 |
+
- this resulted in an initialization error before r7209
|
147 |
+
|
148 |
+
>>> mle = lambda fd, bins: MLEProbDist(fd)
|
149 |
+
>>> train_and_test(mle)
|
150 |
+
22.75%
|
151 |
+
|
152 |
+
Laplace (= Lidstone with gamma==1)
|
153 |
+
|
154 |
+
>>> train_and_test(LaplaceProbDist)
|
155 |
+
66.04%
|
156 |
+
|
157 |
+
Expected Likelihood Estimation (= Lidstone with gamma==0.5)
|
158 |
+
|
159 |
+
>>> train_and_test(ELEProbDist)
|
160 |
+
73.01%
|
161 |
+
|
162 |
+
Lidstone Estimation, for gamma==0.1, 0.5 and 1
|
163 |
+
(the later two should be exactly equal to MLE and ELE above)
|
164 |
+
|
165 |
+
>>> def lidstone(gamma):
|
166 |
+
... return lambda fd, bins: LidstoneProbDist(fd, gamma, bins)
|
167 |
+
>>> train_and_test(lidstone(0.1))
|
168 |
+
82.51%
|
169 |
+
>>> train_and_test(lidstone(0.5))
|
170 |
+
73.01%
|
171 |
+
>>> train_and_test(lidstone(1.0))
|
172 |
+
66.04%
|
173 |
+
|
174 |
+
Witten Bell Estimation
|
175 |
+
----------------------
|
176 |
+
- This resulted in ZeroDivisionError before r7209
|
177 |
+
|
178 |
+
>>> train_and_test(WittenBellProbDist)
|
179 |
+
88.12%
|
180 |
+
|
181 |
+
Good Turing Estimation
|
182 |
+
|
183 |
+
>>> gt = lambda fd, bins: SimpleGoodTuringProbDist(fd, bins=1e5)
|
184 |
+
>>> train_and_test(gt)
|
185 |
+
86.93%
|
186 |
+
|
187 |
+
Kneser Ney Estimation
|
188 |
+
---------------------
|
189 |
+
Since the Kneser-Ney distribution is best suited for trigrams, we must adjust
|
190 |
+
our testing accordingly.
|
191 |
+
|
192 |
+
>>> corpus = [[((x[0],y[0],z[0]),(x[1],y[1],z[1]))
|
193 |
+
... for x, y, z in nltk.trigrams(sent)]
|
194 |
+
... for sent in corpus[:100]]
|
195 |
+
|
196 |
+
We will then need to redefine the rest of the training/testing variables
|
197 |
+
|
198 |
+
>>> tag_set = unique_list(tag for sent in corpus for (word,tag) in sent)
|
199 |
+
>>> len(tag_set)
|
200 |
+
906
|
201 |
+
|
202 |
+
>>> symbols = unique_list(word for sent in corpus for (word,tag) in sent)
|
203 |
+
>>> len(symbols)
|
204 |
+
1341
|
205 |
+
|
206 |
+
>>> trainer = nltk.tag.HiddenMarkovModelTrainer(tag_set, symbols)
|
207 |
+
>>> train_corpus = []
|
208 |
+
>>> test_corpus = []
|
209 |
+
|
210 |
+
>>> for i in range(len(corpus)):
|
211 |
+
... if i % 10:
|
212 |
+
... train_corpus += [corpus[i]]
|
213 |
+
... else:
|
214 |
+
... test_corpus += [corpus[i]]
|
215 |
+
|
216 |
+
>>> len(train_corpus)
|
217 |
+
90
|
218 |
+
>>> len(test_corpus)
|
219 |
+
10
|
220 |
+
|
221 |
+
>>> kn = lambda fd, bins: KneserNeyProbDist(fd)
|
222 |
+
>>> train_and_test(kn)
|
223 |
+
0.86%
|
224 |
+
|
225 |
+
Remains to be added:
|
226 |
+
- Tests for HeldoutProbDist, CrossValidationProbDist and MutableProbDist
|
227 |
+
|
228 |
+
Squashed bugs
|
229 |
+
-------------
|
230 |
+
|
231 |
+
Issue 511: override pop and popitem to invalidate the cache
|
232 |
+
|
233 |
+
>>> fd = nltk.FreqDist('a')
|
234 |
+
>>> list(fd.keys())
|
235 |
+
['a']
|
236 |
+
>>> fd.pop('a')
|
237 |
+
1
|
238 |
+
>>> list(fd.keys())
|
239 |
+
[]
|
240 |
+
|
241 |
+
Issue 533: access cumulative frequencies with no arguments
|
242 |
+
|
243 |
+
>>> fd = nltk.FreqDist('aab')
|
244 |
+
>>> list(fd._cumulative_frequencies(['a']))
|
245 |
+
[2.0]
|
246 |
+
>>> list(fd._cumulative_frequencies(['a', 'b']))
|
247 |
+
[2.0, 3.0]
|
248 |
+
|
249 |
+
Issue 579: override clear to reset some variables
|
250 |
+
|
251 |
+
>>> fd = FreqDist('aab')
|
252 |
+
>>> fd.clear()
|
253 |
+
>>> fd.N()
|
254 |
+
0
|
255 |
+
|
256 |
+
Issue 351: fix fileids method of CategorizedCorpusReader to inadvertently
|
257 |
+
add errant categories
|
258 |
+
|
259 |
+
>>> from nltk.corpus import brown
|
260 |
+
>>> brown.fileids('blah')
|
261 |
+
Traceback (most recent call last):
|
262 |
+
...
|
263 |
+
ValueError: Category blah not found
|
264 |
+
>>> brown.categories()
|
265 |
+
['adventure', 'belles_lettres', 'editorial', 'fiction', 'government', 'hobbies', 'humor', 'learned', 'lore', 'mystery', 'news', 'religion', 'reviews', 'romance', 'science_fiction']
|
266 |
+
|
267 |
+
Issue 175: add the unseen bin to SimpleGoodTuringProbDist by default
|
268 |
+
otherwise any unseen events get a probability of zero, i.e.,
|
269 |
+
they don't get smoothed
|
270 |
+
|
271 |
+
>>> from nltk import SimpleGoodTuringProbDist, FreqDist
|
272 |
+
>>> fd = FreqDist({'a':1, 'b':1, 'c': 2, 'd': 3, 'e': 4, 'f': 4, 'g': 4, 'h': 5, 'i': 5, 'j': 6, 'k': 6, 'l': 6, 'm': 7, 'n': 7, 'o': 8, 'p': 9, 'q': 10})
|
273 |
+
>>> p = SimpleGoodTuringProbDist(fd)
|
274 |
+
>>> p.prob('a')
|
275 |
+
0.017649766667026317...
|
276 |
+
>>> p.prob('o')
|
277 |
+
0.08433050215340411...
|
278 |
+
>>> p.prob('z')
|
279 |
+
0.022727272727272728...
|
280 |
+
>>> p.prob('foobar')
|
281 |
+
0.022727272727272728...
|
282 |
+
|
283 |
+
``MLEProbDist``, ``ConditionalProbDist'', ``DictionaryConditionalProbDist`` and
|
284 |
+
``ConditionalFreqDist`` can be pickled:
|
285 |
+
|
286 |
+
>>> import pickle
|
287 |
+
>>> pd = MLEProbDist(fd)
|
288 |
+
>>> sorted(pd.samples()) == sorted(pickle.loads(pickle.dumps(pd)).samples())
|
289 |
+
True
|
290 |
+
>>> dpd = DictionaryConditionalProbDist({'x': pd})
|
291 |
+
>>> unpickled = pickle.loads(pickle.dumps(dpd))
|
292 |
+
>>> dpd['x'].prob('a')
|
293 |
+
0.011363636...
|
294 |
+
>>> dpd['x'].prob('a') == unpickled['x'].prob('a')
|
295 |
+
True
|
296 |
+
>>> cfd = nltk.probability.ConditionalFreqDist()
|
297 |
+
>>> cfd['foo']['hello'] += 1
|
298 |
+
>>> cfd['foo']['hello'] += 1
|
299 |
+
>>> cfd['bar']['hello'] += 1
|
300 |
+
>>> cfd2 = pickle.loads(pickle.dumps(cfd))
|
301 |
+
>>> cfd2 == cfd
|
302 |
+
True
|
303 |
+
>>> cpd = ConditionalProbDist(cfd, SimpleGoodTuringProbDist)
|
304 |
+
>>> cpd2 = pickle.loads(pickle.dumps(cpd))
|
305 |
+
>>> cpd['foo'].prob('hello') == cpd2['foo'].prob('hello')
|
306 |
+
True
|
lib/python3.10/site-packages/nltk/test/propbank.doctest
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
========
|
5 |
+
PropBank
|
6 |
+
========
|
7 |
+
|
8 |
+
The PropBank Corpus provides predicate-argument annotation for the
|
9 |
+
entire Penn Treebank. Each verb in the treebank is annotated by a single
|
10 |
+
instance in PropBank, containing information about the location of
|
11 |
+
the verb, and the location and identity of its arguments:
|
12 |
+
|
13 |
+
>>> from nltk.corpus import propbank
|
14 |
+
>>> pb_instances = propbank.instances()
|
15 |
+
>>> print(pb_instances)
|
16 |
+
[<PropbankInstance: wsj_0001.mrg, sent 0, word 8>,
|
17 |
+
<PropbankInstance: wsj_0001.mrg, sent 1, word 10>, ...]
|
18 |
+
|
19 |
+
Each propbank instance defines the following member variables:
|
20 |
+
|
21 |
+
- Location information: `fileid`, `sentnum`, `wordnum`
|
22 |
+
- Annotator information: `tagger`
|
23 |
+
- Inflection information: `inflection`
|
24 |
+
- Roleset identifier: `roleset`
|
25 |
+
- Verb (aka predicate) location: `predicate`
|
26 |
+
- Argument locations and types: `arguments`
|
27 |
+
|
28 |
+
The following examples show the types of these arguments:
|
29 |
+
|
30 |
+
>>> inst = pb_instances[103]
|
31 |
+
>>> (inst.fileid, inst.sentnum, inst.wordnum)
|
32 |
+
('wsj_0004.mrg', 8, 16)
|
33 |
+
>>> inst.tagger
|
34 |
+
'gold'
|
35 |
+
>>> inst.inflection
|
36 |
+
<PropbankInflection: vp--a>
|
37 |
+
>>> infl = inst.inflection
|
38 |
+
>>> infl.form, infl.tense, infl.aspect, infl.person, infl.voice
|
39 |
+
('v', 'p', '-', '-', 'a')
|
40 |
+
>>> inst.roleset
|
41 |
+
'rise.01'
|
42 |
+
>>> inst.predicate
|
43 |
+
PropbankTreePointer(16, 0)
|
44 |
+
>>> inst.arguments
|
45 |
+
((PropbankTreePointer(0, 2), 'ARG1'),
|
46 |
+
(PropbankTreePointer(13, 1), 'ARGM-DIS'),
|
47 |
+
(PropbankTreePointer(17, 1), 'ARG4-to'),
|
48 |
+
(PropbankTreePointer(20, 1), 'ARG3-from'))
|
49 |
+
|
50 |
+
The location of the predicate and of the arguments are encoded using
|
51 |
+
`PropbankTreePointer` objects, as well as `PropbankChainTreePointer`
|
52 |
+
objects and `PropbankSplitTreePointer` objects. A
|
53 |
+
`PropbankTreePointer` consists of a `wordnum` and a `height`:
|
54 |
+
|
55 |
+
>>> print(inst.predicate.wordnum, inst.predicate.height)
|
56 |
+
16 0
|
57 |
+
|
58 |
+
This identifies the tree constituent that is headed by the word that
|
59 |
+
is the `wordnum`\ 'th token in the sentence, and whose span is found
|
60 |
+
by going `height` nodes up in the tree. This type of pointer is only
|
61 |
+
useful if we also have the corresponding tree structure, since it
|
62 |
+
includes empty elements such as traces in the word number count. The
|
63 |
+
trees for 10% of the standard PropBank Corpus are contained in the
|
64 |
+
`treebank` corpus:
|
65 |
+
|
66 |
+
>>> tree = inst.tree
|
67 |
+
|
68 |
+
>>> from nltk.corpus import treebank
|
69 |
+
>>> assert tree == treebank.parsed_sents(inst.fileid)[inst.sentnum]
|
70 |
+
|
71 |
+
>>> inst.predicate.select(tree)
|
72 |
+
Tree('VBD', ['rose'])
|
73 |
+
>>> for (argloc, argid) in inst.arguments:
|
74 |
+
... print('%-10s %s' % (argid, argloc.select(tree).pformat(500)[:50]))
|
75 |
+
ARG1 (NP-SBJ (NP (DT The) (NN yield)) (PP (IN on) (NP (
|
76 |
+
ARGM-DIS (PP (IN for) (NP (NN example)))
|
77 |
+
ARG4-to (PP-DIR (TO to) (NP (CD 8.04) (NN %)))
|
78 |
+
ARG3-from (PP-DIR (IN from) (NP (CD 7.90) (NN %)))
|
79 |
+
|
80 |
+
Propbank tree pointers can be converted to standard tree locations,
|
81 |
+
which are usually easier to work with, using the `treepos()` method:
|
82 |
+
|
83 |
+
>>> treepos = inst.predicate.treepos(tree)
|
84 |
+
>>> print (treepos, tree[treepos])
|
85 |
+
(4, 0) (VBD rose)
|
86 |
+
|
87 |
+
In some cases, argument locations will be encoded using
|
88 |
+
`PropbankChainTreePointer`\ s (for trace chains) or
|
89 |
+
`PropbankSplitTreePointer`\ s (for discontinuous constituents). Both
|
90 |
+
of these objects contain a single member variable, `pieces`,
|
91 |
+
containing a list of the constituent pieces. They also define the
|
92 |
+
method `select()`, which will return a tree containing all the
|
93 |
+
elements of the argument. (A new head node is created, labeled
|
94 |
+
"*CHAIN*" or "*SPLIT*", since the argument is not a single constituent
|
95 |
+
in the original tree). Sentence #6 contains an example of an argument
|
96 |
+
that is both discontinuous and contains a chain:
|
97 |
+
|
98 |
+
>>> inst = pb_instances[6]
|
99 |
+
>>> inst.roleset
|
100 |
+
'expose.01'
|
101 |
+
>>> argloc, argid = inst.arguments[2]
|
102 |
+
>>> argloc
|
103 |
+
<PropbankChainTreePointer: 22:1,24:0,25:1*27:0>
|
104 |
+
>>> argloc.pieces
|
105 |
+
[<PropbankSplitTreePointer: 22:1,24:0,25:1>, PropbankTreePointer(27, 0)]
|
106 |
+
>>> argloc.pieces[0].pieces
|
107 |
+
...
|
108 |
+
[PropbankTreePointer(22, 1), PropbankTreePointer(24, 0),
|
109 |
+
PropbankTreePointer(25, 1)]
|
110 |
+
>>> print(argloc.select(inst.tree))
|
111 |
+
(*CHAIN*
|
112 |
+
(*SPLIT* (NP (DT a) (NN group)) (IN of) (NP (NNS workers)))
|
113 |
+
(-NONE- *))
|
114 |
+
|
115 |
+
The PropBank Corpus also provides access to the frameset files, which
|
116 |
+
define the argument labels used by the annotations, on a per-verb
|
117 |
+
basis. Each frameset file contains one or more predicates, such as
|
118 |
+
'turn' or 'turn_on', each of which is divided into coarse-grained word
|
119 |
+
senses called rolesets. For each roleset, the frameset file provides
|
120 |
+
descriptions of the argument roles, along with examples.
|
121 |
+
|
122 |
+
>>> expose_01 = propbank.roleset('expose.01')
|
123 |
+
>>> turn_01 = propbank.roleset('turn.01')
|
124 |
+
>>> print(turn_01)
|
125 |
+
<Element 'roleset' at ...>
|
126 |
+
>>> for role in turn_01.findall("roles/role"):
|
127 |
+
... print(role.attrib['n'], role.attrib['descr'])
|
128 |
+
0 turner
|
129 |
+
1 thing turning
|
130 |
+
m direction, location
|
131 |
+
|
132 |
+
>>> from xml.etree import ElementTree
|
133 |
+
>>> print(ElementTree.tostring(turn_01.find('example')).decode('utf8').strip())
|
134 |
+
<example name="transitive agentive">
|
135 |
+
<text>
|
136 |
+
John turned the key in the lock.
|
137 |
+
</text>
|
138 |
+
<arg n="0">John</arg>
|
139 |
+
<rel>turned</rel>
|
140 |
+
<arg n="1">the key</arg>
|
141 |
+
<arg f="LOC" n="m">in the lock</arg>
|
142 |
+
</example>
|
143 |
+
|
144 |
+
Note that the standard corpus distribution only contains 10% of the
|
145 |
+
treebank, so the parse trees are not available for instances starting
|
146 |
+
at 9353:
|
147 |
+
|
148 |
+
>>> inst = pb_instances[9352]
|
149 |
+
>>> inst.fileid
|
150 |
+
'wsj_0199.mrg'
|
151 |
+
>>> print(inst.tree)
|
152 |
+
(S (NP-SBJ (NNP Trinity)) (VP (VBD said) (SBAR (-NONE- 0) ...))
|
153 |
+
>>> print(inst.predicate.select(inst.tree))
|
154 |
+
(VB begin)
|
155 |
+
|
156 |
+
>>> inst = pb_instances[9353]
|
157 |
+
>>> inst.fileid
|
158 |
+
'wsj_0200.mrg'
|
159 |
+
>>> print(inst.tree)
|
160 |
+
None
|
161 |
+
>>> print(inst.predicate.select(inst.tree))
|
162 |
+
Traceback (most recent call last):
|
163 |
+
. . .
|
164 |
+
ValueError: Parse tree not available
|
165 |
+
|
166 |
+
However, if you supply your own version of the treebank corpus (by
|
167 |
+
putting it before the nltk-provided version on `nltk.data.path`, or
|
168 |
+
by creating a `ptb` directory as described above and using the
|
169 |
+
`propbank_ptb` module), then you can access the trees for all
|
170 |
+
instances.
|
171 |
+
|
172 |
+
A list of the verb lemmas contained in PropBank is returned by the
|
173 |
+
`propbank.verbs()` method:
|
174 |
+
|
175 |
+
>>> propbank.verbs()
|
176 |
+
['abandon', 'abate', 'abdicate', 'abet', 'abide', ...]
|
lib/python3.10/site-packages/nltk/test/relextract.doctest
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
======================
|
5 |
+
Information Extraction
|
6 |
+
======================
|
7 |
+
|
8 |
+
Information Extraction standardly consists of three subtasks:
|
9 |
+
|
10 |
+
#. Named Entity Recognition
|
11 |
+
|
12 |
+
#. Relation Extraction
|
13 |
+
|
14 |
+
#. Template Filling
|
15 |
+
|
16 |
+
Named Entities
|
17 |
+
~~~~~~~~~~~~~~
|
18 |
+
|
19 |
+
The IEER corpus is marked up for a variety of Named Entities. A Named
|
20 |
+
Entity (more strictly, a Named Entity mention) is a name of an
|
21 |
+
entity belonging to a specified class. For example, the Named Entity
|
22 |
+
classes in IEER include PERSON, LOCATION, ORGANIZATION, DATE and so
|
23 |
+
on. Within NLTK, Named Entities are represented as subtrees within a
|
24 |
+
chunk structure: the class name is treated as node label, while the
|
25 |
+
entity mention itself appears as the leaves of the subtree. This is
|
26 |
+
illustrated below, where we have show an extract of the chunk
|
27 |
+
representation of document NYT_19980315.064:
|
28 |
+
|
29 |
+
>>> from nltk.corpus import ieer
|
30 |
+
>>> docs = ieer.parsed_docs('NYT_19980315')
|
31 |
+
>>> tree = docs[1].text
|
32 |
+
>>> print(tree)
|
33 |
+
(DOCUMENT
|
34 |
+
...
|
35 |
+
``It's
|
36 |
+
a
|
37 |
+
chance
|
38 |
+
to
|
39 |
+
think
|
40 |
+
about
|
41 |
+
first-level
|
42 |
+
questions,''
|
43 |
+
said
|
44 |
+
Ms.
|
45 |
+
(PERSON Cohn)
|
46 |
+
,
|
47 |
+
a
|
48 |
+
partner
|
49 |
+
in
|
50 |
+
the
|
51 |
+
(ORGANIZATION McGlashan & Sarrail)
|
52 |
+
firm
|
53 |
+
in
|
54 |
+
(LOCATION San Mateo)
|
55 |
+
,
|
56 |
+
(LOCATION Calif.)
|
57 |
+
...)
|
58 |
+
|
59 |
+
Thus, the Named Entity mentions in this example are *Cohn*, *McGlashan &
|
60 |
+
Sarrail*, *San Mateo* and *Calif.*.
|
61 |
+
|
62 |
+
The CoNLL2002 Dutch and Spanish data is treated similarly, although in
|
63 |
+
this case, the strings are also POS tagged.
|
64 |
+
|
65 |
+
>>> from nltk.corpus import conll2002
|
66 |
+
>>> for doc in conll2002.chunked_sents('ned.train')[27]:
|
67 |
+
... print(doc)
|
68 |
+
('Het', 'Art')
|
69 |
+
(ORG Hof/N van/Prep Cassatie/N)
|
70 |
+
('verbrak', 'V')
|
71 |
+
('het', 'Art')
|
72 |
+
('arrest', 'N')
|
73 |
+
('zodat', 'Conj')
|
74 |
+
('het', 'Pron')
|
75 |
+
('moest', 'V')
|
76 |
+
('worden', 'V')
|
77 |
+
('overgedaan', 'V')
|
78 |
+
('door', 'Prep')
|
79 |
+
('het', 'Art')
|
80 |
+
('hof', 'N')
|
81 |
+
('van', 'Prep')
|
82 |
+
('beroep', 'N')
|
83 |
+
('van', 'Prep')
|
84 |
+
(LOC Antwerpen/N)
|
85 |
+
('.', 'Punc')
|
86 |
+
|
87 |
+
Relation Extraction
|
88 |
+
~~~~~~~~~~~~~~~~~~~
|
89 |
+
|
90 |
+
Relation Extraction standardly consists of identifying specified
|
91 |
+
relations between Named Entities. For example, assuming that we can
|
92 |
+
recognize ORGANIZATIONs and LOCATIONs in text, we might want to also
|
93 |
+
recognize pairs *(o, l)* of these kinds of entities such that *o* is
|
94 |
+
located in *l*.
|
95 |
+
|
96 |
+
The `sem.relextract` module provides some tools to help carry out a
|
97 |
+
simple version of this task. The `tree2semi_rel()` function splits a chunk
|
98 |
+
document into a list of two-member lists, each of which consists of a
|
99 |
+
(possibly empty) string followed by a `Tree` (i.e., a Named Entity):
|
100 |
+
|
101 |
+
>>> from nltk.sem import relextract
|
102 |
+
>>> pairs = relextract.tree2semi_rel(tree)
|
103 |
+
>>> for s, tree in pairs[18:22]:
|
104 |
+
... print('("...%s", %s)' % (" ".join(s[-5:]),tree))
|
105 |
+
("...about first-level questions,'' said Ms.", (PERSON Cohn))
|
106 |
+
("..., a partner in the", (ORGANIZATION McGlashan & Sarrail))
|
107 |
+
("...firm in", (LOCATION San Mateo))
|
108 |
+
("...,", (LOCATION Calif.))
|
109 |
+
|
110 |
+
The function `semi_rel2reldict()` processes triples of these pairs, i.e.,
|
111 |
+
pairs of the form ``((string1, Tree1), (string2, Tree2), (string3,
|
112 |
+
Tree3))`` and outputs a dictionary (a `reldict`) in which ``Tree1`` is
|
113 |
+
the subject of the relation, ``string2`` is the filler
|
114 |
+
and ``Tree3`` is the object of the relation. ``string1`` and ``string3`` are
|
115 |
+
stored as left and right context respectively.
|
116 |
+
|
117 |
+
>>> reldicts = relextract.semi_rel2reldict(pairs)
|
118 |
+
>>> for k, v in sorted(reldicts[0].items()):
|
119 |
+
... print(k, '=>', v)
|
120 |
+
filler => of messages to their own ``Cyberia'' ...
|
121 |
+
lcon => transactions.'' Each week, they post
|
122 |
+
objclass => ORGANIZATION
|
123 |
+
objsym => white_house
|
124 |
+
objtext => White House
|
125 |
+
rcon => for access to its planned
|
126 |
+
subjclass => CARDINAL
|
127 |
+
subjsym => hundreds
|
128 |
+
subjtext => hundreds
|
129 |
+
untagged_filler => of messages to their own ``Cyberia'' ...
|
130 |
+
|
131 |
+
The next example shows some of the values for two `reldict`\ s
|
132 |
+
corresponding to the ``'NYT_19980315'`` text extract shown earlier.
|
133 |
+
|
134 |
+
>>> for r in reldicts[18:20]:
|
135 |
+
... print('=' * 20)
|
136 |
+
... print(r['subjtext'])
|
137 |
+
... print(r['filler'])
|
138 |
+
... print(r['objtext'])
|
139 |
+
====================
|
140 |
+
Cohn
|
141 |
+
, a partner in the
|
142 |
+
McGlashan & Sarrail
|
143 |
+
====================
|
144 |
+
McGlashan & Sarrail
|
145 |
+
firm in
|
146 |
+
San Mateo
|
147 |
+
|
148 |
+
The function `relextract()` allows us to filter the `reldict`\ s
|
149 |
+
according to the classes of the subject and object named entities. In
|
150 |
+
addition, we can specify that the filler text has to match a given
|
151 |
+
regular expression, as illustrated in the next example. Here, we are
|
152 |
+
looking for pairs of entities in the IN relation, where IN has
|
153 |
+
signature <ORG, LOC>.
|
154 |
+
|
155 |
+
>>> import re
|
156 |
+
>>> IN = re.compile(r'.*\bin\b(?!\b.+ing\b)')
|
157 |
+
>>> for fileid in ieer.fileids():
|
158 |
+
... for doc in ieer.parsed_docs(fileid):
|
159 |
+
... for rel in relextract.extract_rels('ORG', 'LOC', doc, corpus='ieer', pattern = IN):
|
160 |
+
... print(relextract.rtuple(rel))
|
161 |
+
[ORG: 'Christian Democrats'] ', the leading political forces in' [LOC: 'Italy']
|
162 |
+
[ORG: 'AP'] ') _ Lebanese guerrillas attacked Israeli forces in southern' [LOC: 'Lebanon']
|
163 |
+
[ORG: 'Security Council'] 'adopted Resolution 425. Huge yellow banners hung across intersections in' [LOC: 'Beirut']
|
164 |
+
[ORG: 'U.N.'] 'failures in' [LOC: 'Africa']
|
165 |
+
[ORG: 'U.N.'] 'peacekeeping operation in' [LOC: 'Somalia']
|
166 |
+
[ORG: 'U.N.'] 'partners on a more effective role in' [LOC: 'Africa']
|
167 |
+
[ORG: 'AP'] ') _ A bomb exploded in a mosque in central' [LOC: 'San`a']
|
168 |
+
[ORG: 'Krasnoye Sormovo'] 'shipyard in the Soviet city of' [LOC: 'Gorky']
|
169 |
+
[ORG: 'Kelab Golf Darul Ridzuan'] 'in' [LOC: 'Perak']
|
170 |
+
[ORG: 'U.N.'] 'peacekeeping operation in' [LOC: 'Somalia']
|
171 |
+
[ORG: 'WHYY'] 'in' [LOC: 'Philadelphia']
|
172 |
+
[ORG: 'McGlashan & Sarrail'] 'firm in' [LOC: 'San Mateo']
|
173 |
+
[ORG: 'Freedom Forum'] 'in' [LOC: 'Arlington']
|
174 |
+
[ORG: 'Brookings Institution'] ', the research group in' [LOC: 'Washington']
|
175 |
+
[ORG: 'Idealab'] ', a self-described business incubator based in' [LOC: 'Los Angeles']
|
176 |
+
[ORG: 'Open Text'] ', based in' [LOC: 'Waterloo']
|
177 |
+
...
|
178 |
+
|
179 |
+
The next example illustrates a case where the pattern is a disjunction
|
180 |
+
of roles that a PERSON can occupy in an ORGANIZATION.
|
181 |
+
|
182 |
+
>>> roles = r"""
|
183 |
+
... (.*(
|
184 |
+
... analyst|
|
185 |
+
... chair(wo)?man|
|
186 |
+
... commissioner|
|
187 |
+
... counsel|
|
188 |
+
... director|
|
189 |
+
... economist|
|
190 |
+
... editor|
|
191 |
+
... executive|
|
192 |
+
... foreman|
|
193 |
+
... governor|
|
194 |
+
... head|
|
195 |
+
... lawyer|
|
196 |
+
... leader|
|
197 |
+
... librarian).*)|
|
198 |
+
... manager|
|
199 |
+
... partner|
|
200 |
+
... president|
|
201 |
+
... producer|
|
202 |
+
... professor|
|
203 |
+
... researcher|
|
204 |
+
... spokes(wo)?man|
|
205 |
+
... writer|
|
206 |
+
... ,\sof\sthe?\s* # "X, of (the) Y"
|
207 |
+
... """
|
208 |
+
>>> ROLES = re.compile(roles, re.VERBOSE)
|
209 |
+
>>> for fileid in ieer.fileids():
|
210 |
+
... for doc in ieer.parsed_docs(fileid):
|
211 |
+
... for rel in relextract.extract_rels('PER', 'ORG', doc, corpus='ieer', pattern=ROLES):
|
212 |
+
... print(relextract.rtuple(rel))
|
213 |
+
[PER: 'Kivutha Kibwana'] ', of the' [ORG: 'National Convention Assembly']
|
214 |
+
[PER: 'Boban Boskovic'] ', chief executive of the' [ORG: 'Plastika']
|
215 |
+
[PER: 'Annan'] ', the first sub-Saharan African to head the' [ORG: 'United Nations']
|
216 |
+
[PER: 'Kiriyenko'] 'became a foreman at the' [ORG: 'Krasnoye Sormovo']
|
217 |
+
[PER: 'Annan'] ', the first sub-Saharan African to head the' [ORG: 'United Nations']
|
218 |
+
[PER: 'Mike Godwin'] ', chief counsel for the' [ORG: 'Electronic Frontier Foundation']
|
219 |
+
...
|
220 |
+
|
221 |
+
In the case of the CoNLL2002 data, we can include POS tags in the
|
222 |
+
query pattern. This example also illustrates how the output can be
|
223 |
+
presented as something that looks more like a clause in a logical language.
|
224 |
+
|
225 |
+
>>> de = """
|
226 |
+
... .*
|
227 |
+
... (
|
228 |
+
... de/SP|
|
229 |
+
... del/SP
|
230 |
+
... )
|
231 |
+
... """
|
232 |
+
>>> DE = re.compile(de, re.VERBOSE)
|
233 |
+
>>> rels = [rel for doc in conll2002.chunked_sents('esp.train')
|
234 |
+
... for rel in relextract.extract_rels('ORG', 'LOC', doc, corpus='conll2002', pattern = DE)]
|
235 |
+
>>> for r in rels[:10]:
|
236 |
+
... print(relextract.clause(r, relsym='DE'))
|
237 |
+
DE('tribunal_supremo', 'victoria')
|
238 |
+
DE('museo_de_arte', 'alcorc\xf3n')
|
239 |
+
DE('museo_de_bellas_artes', 'a_coru\xf1a')
|
240 |
+
DE('siria', 'l\xedbano')
|
241 |
+
DE('uni\xf3n_europea', 'pek\xedn')
|
242 |
+
DE('ej\xe9rcito', 'rogberi')
|
243 |
+
DE('juzgado_de_instrucci\xf3n_n\xfamero_1', 'san_sebasti\xe1n')
|
244 |
+
DE('psoe', 'villanueva_de_la_serena')
|
245 |
+
DE('ej\xe9rcito', 'l\xedbano')
|
246 |
+
DE('juzgado_de_lo_penal_n\xfamero_2', 'ceuta')
|
247 |
+
>>> vnv = """
|
248 |
+
... (
|
249 |
+
... is/V|
|
250 |
+
... was/V|
|
251 |
+
... werd/V|
|
252 |
+
... wordt/V
|
253 |
+
... )
|
254 |
+
... .*
|
255 |
+
... van/Prep
|
256 |
+
... """
|
257 |
+
>>> VAN = re.compile(vnv, re.VERBOSE)
|
258 |
+
>>> for doc in conll2002.chunked_sents('ned.train'):
|
259 |
+
... for r in relextract.extract_rels('PER', 'ORG', doc, corpus='conll2002', pattern=VAN):
|
260 |
+
... print(relextract.clause(r, relsym="VAN"))
|
261 |
+
VAN("cornet_d'elzius", 'buitenlandse_handel')
|
262 |
+
VAN('johan_rottiers', 'kardinaal_van_roey_instituut')
|
263 |
+
VAN('annie_lennox', 'eurythmics')
|
lib/python3.10/site-packages/nltk/test/resolution.doctest
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=========================
|
5 |
+
Resolution Theorem Prover
|
6 |
+
=========================
|
7 |
+
|
8 |
+
>>> from nltk.inference.resolution import *
|
9 |
+
>>> from nltk.sem import logic
|
10 |
+
>>> from nltk.sem.logic import *
|
11 |
+
>>> logic._counter._value = 0
|
12 |
+
>>> read_expr = logic.Expression.fromstring
|
13 |
+
|
14 |
+
>>> P = read_expr('P')
|
15 |
+
>>> Q = read_expr('Q')
|
16 |
+
>>> R = read_expr('R')
|
17 |
+
>>> A = read_expr('A')
|
18 |
+
>>> B = read_expr('B')
|
19 |
+
>>> x = read_expr('x')
|
20 |
+
>>> y = read_expr('y')
|
21 |
+
>>> z = read_expr('z')
|
22 |
+
|
23 |
+
-------------------------------
|
24 |
+
Test most_general_unification()
|
25 |
+
-------------------------------
|
26 |
+
>>> print(most_general_unification(x, x))
|
27 |
+
{}
|
28 |
+
>>> print(most_general_unification(A, A))
|
29 |
+
{}
|
30 |
+
>>> print(most_general_unification(A, x))
|
31 |
+
{x: A}
|
32 |
+
>>> print(most_general_unification(x, A))
|
33 |
+
{x: A}
|
34 |
+
>>> print(most_general_unification(x, y))
|
35 |
+
{x: y}
|
36 |
+
>>> print(most_general_unification(P(x), P(A)))
|
37 |
+
{x: A}
|
38 |
+
>>> print(most_general_unification(P(x,B), P(A,y)))
|
39 |
+
{x: A, y: B}
|
40 |
+
>>> print(most_general_unification(P(x,B), P(B,x)))
|
41 |
+
{x: B}
|
42 |
+
>>> print(most_general_unification(P(x,y), P(A,x)))
|
43 |
+
{x: A, y: x}
|
44 |
+
>>> print(most_general_unification(P(Q(x)), P(y)))
|
45 |
+
{y: Q(x)}
|
46 |
+
|
47 |
+
------------
|
48 |
+
Test unify()
|
49 |
+
------------
|
50 |
+
>>> print(Clause([]).unify(Clause([])))
|
51 |
+
[]
|
52 |
+
>>> print(Clause([P(x)]).unify(Clause([-P(A)])))
|
53 |
+
[{}]
|
54 |
+
>>> print(Clause([P(A), Q(x)]).unify(Clause([-P(x), R(x)])))
|
55 |
+
[{R(A), Q(A)}]
|
56 |
+
>>> print(Clause([P(A), Q(x), R(x,y)]).unify(Clause([-P(x), Q(y)])))
|
57 |
+
[{Q(y), Q(A), R(A,y)}]
|
58 |
+
>>> print(Clause([P(A), -Q(y)]).unify(Clause([-P(x), Q(B)])))
|
59 |
+
[{}]
|
60 |
+
>>> print(Clause([P(x), Q(x)]).unify(Clause([-P(A), -Q(B)])))
|
61 |
+
[{-Q(B), Q(A)}, {-P(A), P(B)}]
|
62 |
+
>>> print(Clause([P(x,x), Q(x), R(x)]).unify(Clause([-P(A,z), -Q(B)])))
|
63 |
+
[{-Q(B), Q(A), R(A)}, {-P(A,z), R(B), P(B,B)}]
|
64 |
+
|
65 |
+
>>> a = clausify(read_expr('P(A)'))
|
66 |
+
>>> b = clausify(read_expr('A=B'))
|
67 |
+
>>> print(a[0].unify(b[0]))
|
68 |
+
[{P(B)}]
|
69 |
+
|
70 |
+
-------------------------
|
71 |
+
Test is_tautology()
|
72 |
+
-------------------------
|
73 |
+
>>> print(Clause([P(A), -P(A)]).is_tautology())
|
74 |
+
True
|
75 |
+
>>> print(Clause([-P(A), P(A)]).is_tautology())
|
76 |
+
True
|
77 |
+
>>> print(Clause([P(x), -P(A)]).is_tautology())
|
78 |
+
False
|
79 |
+
>>> print(Clause([Q(B), -P(A), P(A)]).is_tautology())
|
80 |
+
True
|
81 |
+
>>> print(Clause([-Q(A), P(R(A)), -P(R(A)), Q(x), -R(y)]).is_tautology())
|
82 |
+
True
|
83 |
+
>>> print(Clause([P(x), -Q(A)]).is_tautology())
|
84 |
+
False
|
85 |
+
|
86 |
+
-------------------------
|
87 |
+
Test subsumes()
|
88 |
+
-------------------------
|
89 |
+
>>> print(Clause([P(A), Q(B)]).subsumes(Clause([P(A), Q(B)])))
|
90 |
+
True
|
91 |
+
>>> print(Clause([-P(A)]).subsumes(Clause([P(A)])))
|
92 |
+
False
|
93 |
+
>>> print(Clause([P(A), Q(B)]).subsumes(Clause([Q(B), P(A)])))
|
94 |
+
True
|
95 |
+
>>> print(Clause([P(A), Q(B)]).subsumes(Clause([Q(B), R(A), P(A)])))
|
96 |
+
True
|
97 |
+
>>> print(Clause([P(A), R(A), Q(B)]).subsumes(Clause([Q(B), P(A)])))
|
98 |
+
False
|
99 |
+
>>> print(Clause([P(x)]).subsumes(Clause([P(A)])))
|
100 |
+
True
|
101 |
+
>>> print(Clause([P(A)]).subsumes(Clause([P(x)])))
|
102 |
+
True
|
103 |
+
|
104 |
+
------------
|
105 |
+
Test prove()
|
106 |
+
------------
|
107 |
+
>>> print(ResolutionProverCommand(read_expr('man(x)')).prove())
|
108 |
+
False
|
109 |
+
>>> print(ResolutionProverCommand(read_expr('(man(x) -> man(x))')).prove())
|
110 |
+
True
|
111 |
+
>>> print(ResolutionProverCommand(read_expr('(man(x) -> --man(x))')).prove())
|
112 |
+
True
|
113 |
+
>>> print(ResolutionProverCommand(read_expr('-(man(x) & -man(x))')).prove())
|
114 |
+
True
|
115 |
+
>>> print(ResolutionProverCommand(read_expr('(man(x) | -man(x))')).prove())
|
116 |
+
True
|
117 |
+
>>> print(ResolutionProverCommand(read_expr('(man(x) -> man(x))')).prove())
|
118 |
+
True
|
119 |
+
>>> print(ResolutionProverCommand(read_expr('-(man(x) & -man(x))')).prove())
|
120 |
+
True
|
121 |
+
>>> print(ResolutionProverCommand(read_expr('(man(x) | -man(x))')).prove())
|
122 |
+
True
|
123 |
+
>>> print(ResolutionProverCommand(read_expr('(man(x) -> man(x))')).prove())
|
124 |
+
True
|
125 |
+
>>> print(ResolutionProverCommand(read_expr('(man(x) <-> man(x))')).prove())
|
126 |
+
True
|
127 |
+
>>> print(ResolutionProverCommand(read_expr('-(man(x) <-> -man(x))')).prove())
|
128 |
+
True
|
129 |
+
>>> print(ResolutionProverCommand(read_expr('all x.man(x)')).prove())
|
130 |
+
False
|
131 |
+
>>> print(ResolutionProverCommand(read_expr('-all x.some y.F(x,y) & some x.all y.(-F(x,y))')).prove())
|
132 |
+
False
|
133 |
+
>>> print(ResolutionProverCommand(read_expr('some x.all y.sees(x,y)')).prove())
|
134 |
+
False
|
135 |
+
|
136 |
+
>>> p1 = read_expr('all x.(man(x) -> mortal(x))')
|
137 |
+
>>> p2 = read_expr('man(Socrates)')
|
138 |
+
>>> c = read_expr('mortal(Socrates)')
|
139 |
+
>>> ResolutionProverCommand(c, [p1,p2]).prove()
|
140 |
+
True
|
141 |
+
|
142 |
+
>>> p1 = read_expr('all x.(man(x) -> walks(x))')
|
143 |
+
>>> p2 = read_expr('man(John)')
|
144 |
+
>>> c = read_expr('some y.walks(y)')
|
145 |
+
>>> ResolutionProverCommand(c, [p1,p2]).prove()
|
146 |
+
True
|
147 |
+
|
148 |
+
>>> p = read_expr('some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))')
|
149 |
+
>>> c = read_expr('some e0.walk(e0,mary)')
|
150 |
+
>>> ResolutionProverCommand(c, [p]).prove()
|
151 |
+
True
|
152 |
+
|
153 |
+
------------
|
154 |
+
Test proof()
|
155 |
+
------------
|
156 |
+
>>> p1 = read_expr('all x.(man(x) -> mortal(x))')
|
157 |
+
>>> p2 = read_expr('man(Socrates)')
|
158 |
+
>>> c = read_expr('mortal(Socrates)')
|
159 |
+
>>> logic._counter._value = 0
|
160 |
+
>>> tp = ResolutionProverCommand(c, [p1,p2])
|
161 |
+
>>> tp.prove()
|
162 |
+
True
|
163 |
+
>>> print(tp.proof())
|
164 |
+
[1] {-mortal(Socrates)} A
|
165 |
+
[2] {-man(z2), mortal(z2)} A
|
166 |
+
[3] {man(Socrates)} A
|
167 |
+
[4] {-man(Socrates)} (1, 2)
|
168 |
+
[5] {mortal(Socrates)} (2, 3)
|
169 |
+
[6] {} (1, 5)
|
170 |
+
<BLANKLINE>
|
171 |
+
|
172 |
+
------------------
|
173 |
+
Question Answering
|
174 |
+
------------------
|
175 |
+
One answer
|
176 |
+
|
177 |
+
>>> p1 = read_expr('father_of(art,john)')
|
178 |
+
>>> p2 = read_expr('father_of(bob,kim)')
|
179 |
+
>>> p3 = read_expr('all x.all y.(father_of(x,y) -> parent_of(x,y))')
|
180 |
+
>>> c = read_expr('all x.(parent_of(x,john) -> ANSWER(x))')
|
181 |
+
>>> logic._counter._value = 0
|
182 |
+
>>> tp = ResolutionProverCommand(None, [p1,p2,p3,c])
|
183 |
+
>>> sorted(tp.find_answers())
|
184 |
+
[<ConstantExpression art>]
|
185 |
+
>>> print(tp.proof()) # doctest: +SKIP
|
186 |
+
[1] {father_of(art,john)} A
|
187 |
+
[2] {father_of(bob,kim)} A
|
188 |
+
[3] {-father_of(z3,z4), parent_of(z3,z4)} A
|
189 |
+
[4] {-parent_of(z6,john), ANSWER(z6)} A
|
190 |
+
[5] {parent_of(art,john)} (1, 3)
|
191 |
+
[6] {parent_of(bob,kim)} (2, 3)
|
192 |
+
[7] {ANSWER(z6), -father_of(z6,john)} (3, 4)
|
193 |
+
[8] {ANSWER(art)} (1, 7)
|
194 |
+
[9] {ANSWER(art)} (4, 5)
|
195 |
+
<BLANKLINE>
|
196 |
+
|
197 |
+
Multiple answers
|
198 |
+
|
199 |
+
>>> p1 = read_expr('father_of(art,john)')
|
200 |
+
>>> p2 = read_expr('mother_of(ann,john)')
|
201 |
+
>>> p3 = read_expr('all x.all y.(father_of(x,y) -> parent_of(x,y))')
|
202 |
+
>>> p4 = read_expr('all x.all y.(mother_of(x,y) -> parent_of(x,y))')
|
203 |
+
>>> c = read_expr('all x.(parent_of(x,john) -> ANSWER(x))')
|
204 |
+
>>> logic._counter._value = 0
|
205 |
+
>>> tp = ResolutionProverCommand(None, [p1,p2,p3,p4,c])
|
206 |
+
>>> sorted(tp.find_answers())
|
207 |
+
[<ConstantExpression ann>, <ConstantExpression art>]
|
208 |
+
>>> print(tp.proof()) # doctest: +SKIP
|
209 |
+
[ 1] {father_of(art,john)} A
|
210 |
+
[ 2] {mother_of(ann,john)} A
|
211 |
+
[ 3] {-father_of(z3,z4), parent_of(z3,z4)} A
|
212 |
+
[ 4] {-mother_of(z7,z8), parent_of(z7,z8)} A
|
213 |
+
[ 5] {-parent_of(z10,john), ANSWER(z10)} A
|
214 |
+
[ 6] {parent_of(art,john)} (1, 3)
|
215 |
+
[ 7] {parent_of(ann,john)} (2, 4)
|
216 |
+
[ 8] {ANSWER(z10), -father_of(z10,john)} (3, 5)
|
217 |
+
[ 9] {ANSWER(art)} (1, 8)
|
218 |
+
[10] {ANSWER(z10), -mother_of(z10,john)} (4, 5)
|
219 |
+
[11] {ANSWER(ann)} (2, 10)
|
220 |
+
[12] {ANSWER(art)} (5, 6)
|
221 |
+
[13] {ANSWER(ann)} (5, 7)
|
222 |
+
<BLANKLINE>
|
lib/python3.10/site-packages/nltk/test/semantics.doctest
ADDED
@@ -0,0 +1,667 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=========
|
5 |
+
Semantics
|
6 |
+
=========
|
7 |
+
|
8 |
+
>>> # Setup tests by setting the counter to 0
|
9 |
+
>>> from nltk.sem import logic
|
10 |
+
>>> logic._counter._value = 0
|
11 |
+
|
12 |
+
>>> import nltk
|
13 |
+
>>> from nltk.sem import Valuation, Model
|
14 |
+
>>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),
|
15 |
+
... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])),
|
16 |
+
... ('dog', set(['d1'])),
|
17 |
+
... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
|
18 |
+
>>> val = Valuation(v)
|
19 |
+
>>> dom = val.domain
|
20 |
+
>>> m = Model(dom, val)
|
21 |
+
|
22 |
+
Evaluation
|
23 |
+
----------
|
24 |
+
|
25 |
+
The top-level method of a ``Model`` instance is ``evaluate()``, which
|
26 |
+
assigns a semantic value to expressions of the ``logic`` module, under
|
27 |
+
an assignment ``g``:
|
28 |
+
|
29 |
+
>>> dom = val.domain
|
30 |
+
>>> g = nltk.sem.Assignment(dom)
|
31 |
+
>>> m.evaluate('all x.(boy(x) -> - girl(x))', g)
|
32 |
+
True
|
33 |
+
|
34 |
+
|
35 |
+
``evaluate()`` calls a recursive function ``satisfy()``, which in turn
|
36 |
+
calls a function ``i()`` to interpret non-logical constants and
|
37 |
+
individual variables. ``i()`` delegates the interpretation of these to
|
38 |
+
the the model's ``Valuation`` and the variable assignment ``g``
|
39 |
+
respectively. Any atomic expression which cannot be assigned a value
|
40 |
+
by ``i`` raises an ``Undefined`` exception; this is caught by
|
41 |
+
``evaluate``, which returns the string ``'Undefined'``.
|
42 |
+
|
43 |
+
>>> m.evaluate('walk(adam)', g, trace=2)
|
44 |
+
<BLANKLINE>
|
45 |
+
'walk(adam)' is undefined under M, g
|
46 |
+
'Undefined'
|
47 |
+
|
48 |
+
Batch Processing
|
49 |
+
----------------
|
50 |
+
|
51 |
+
The utility functions ``interpret_sents()`` and ``evaluate_sents()`` are intended to
|
52 |
+
help with processing multiple sentences. Here's an example of the first of these:
|
53 |
+
|
54 |
+
>>> sents = ['Mary walks']
|
55 |
+
>>> results = nltk.sem.util.interpret_sents(sents, 'grammars/sample_grammars/sem2.fcfg')
|
56 |
+
>>> for result in results:
|
57 |
+
... for (synrep, semrep) in result:
|
58 |
+
... print(synrep)
|
59 |
+
(S[SEM=<walk(mary)>]
|
60 |
+
(NP[-LOC, NUM='sg', SEM=<\P.P(mary)>]
|
61 |
+
(PropN[-LOC, NUM='sg', SEM=<\P.P(mary)>] Mary))
|
62 |
+
(VP[NUM='sg', SEM=<\x.walk(x)>]
|
63 |
+
(IV[NUM='sg', SEM=<\x.walk(x)>, TNS='pres'] walks)))
|
64 |
+
|
65 |
+
In order to provide backwards compatibility with 'legacy' grammars where the semantics value
|
66 |
+
is specified with a lowercase
|
67 |
+
``sem`` feature, the relevant feature name can be passed to the function using the
|
68 |
+
``semkey`` parameter, as shown here:
|
69 |
+
|
70 |
+
>>> sents = ['raining']
|
71 |
+
>>> g = nltk.grammar.FeatureGrammar.fromstring("""
|
72 |
+
... % start S
|
73 |
+
... S[sem=<raining>] -> 'raining'
|
74 |
+
... """)
|
75 |
+
>>> results = nltk.sem.util.interpret_sents(sents, g, semkey='sem')
|
76 |
+
>>> for result in results:
|
77 |
+
... for (synrep, semrep) in result:
|
78 |
+
... print(semrep)
|
79 |
+
raining
|
80 |
+
|
81 |
+
The function ``evaluate_sents()`` works in a similar manner, but also needs to be
|
82 |
+
passed a ``Model`` against which the semantic representations are evaluated.
|
83 |
+
|
84 |
+
Unit Tests
|
85 |
+
==========
|
86 |
+
|
87 |
+
|
88 |
+
Unit tests for relations and valuations
|
89 |
+
---------------------------------------
|
90 |
+
|
91 |
+
>>> from nltk.sem import *
|
92 |
+
|
93 |
+
Relations are sets of tuples, all of the same length.
|
94 |
+
|
95 |
+
>>> s1 = set([('d1', 'd2'), ('d1', 'd1'), ('d2', 'd1')])
|
96 |
+
>>> is_rel(s1)
|
97 |
+
True
|
98 |
+
>>> s2 = set([('d1', 'd2'), ('d1', 'd2'), ('d1',)])
|
99 |
+
>>> is_rel(s2)
|
100 |
+
Traceback (most recent call last):
|
101 |
+
. . .
|
102 |
+
ValueError: Set set([('d1', 'd2'), ('d1',)]) contains sequences of different lengths
|
103 |
+
>>> s3 = set(['d1', 'd2'])
|
104 |
+
>>> is_rel(s3)
|
105 |
+
Traceback (most recent call last):
|
106 |
+
. . .
|
107 |
+
ValueError: Set set(['d2', 'd1']) contains sequences of different lengths
|
108 |
+
>>> s4 = set2rel(s3)
|
109 |
+
>>> is_rel(s4)
|
110 |
+
True
|
111 |
+
>>> is_rel(set())
|
112 |
+
True
|
113 |
+
>>> null_binary_rel = set([(None, None)])
|
114 |
+
>>> is_rel(null_binary_rel)
|
115 |
+
True
|
116 |
+
|
117 |
+
Sets of entities are converted into sets of singleton tuples
|
118 |
+
(containing strings).
|
119 |
+
|
120 |
+
>>> sorted(set2rel(s3))
|
121 |
+
[('d1',), ('d2',)]
|
122 |
+
>>> sorted(set2rel(set([1,3,5,])))
|
123 |
+
['1', '3', '5']
|
124 |
+
>>> set2rel(set()) == set()
|
125 |
+
True
|
126 |
+
>>> set2rel(set2rel(s3)) == set2rel(s3)
|
127 |
+
True
|
128 |
+
|
129 |
+
Predication is evaluated by set membership.
|
130 |
+
|
131 |
+
>>> ('d1', 'd2') in s1
|
132 |
+
True
|
133 |
+
>>> ('d2', 'd2') in s1
|
134 |
+
False
|
135 |
+
>>> ('d1',) in s1
|
136 |
+
False
|
137 |
+
>>> 'd2' in s1
|
138 |
+
False
|
139 |
+
>>> ('d1',) in s4
|
140 |
+
True
|
141 |
+
>>> ('d1',) in set()
|
142 |
+
False
|
143 |
+
>>> 'd1' in null_binary_rel
|
144 |
+
False
|
145 |
+
|
146 |
+
|
147 |
+
>>> val = Valuation([('Fido', 'd1'), ('dog', set(['d1', 'd2'])), ('walk', set())])
|
148 |
+
>>> sorted(val['dog'])
|
149 |
+
[('d1',), ('d2',)]
|
150 |
+
>>> val.domain == set(['d1', 'd2'])
|
151 |
+
True
|
152 |
+
>>> print(val.symbols)
|
153 |
+
['Fido', 'dog', 'walk']
|
154 |
+
|
155 |
+
|
156 |
+
Parse a valuation from a string.
|
157 |
+
|
158 |
+
>>> v = """
|
159 |
+
... john => b1
|
160 |
+
... mary => g1
|
161 |
+
... suzie => g2
|
162 |
+
... fido => d1
|
163 |
+
... tess => d2
|
164 |
+
... noosa => n
|
165 |
+
... girl => {g1, g2}
|
166 |
+
... boy => {b1, b2}
|
167 |
+
... dog => {d1, d2}
|
168 |
+
... bark => {d1, d2}
|
169 |
+
... walk => {b1, g2, d1}
|
170 |
+
... chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)}
|
171 |
+
... see => {(b1, g1), (b2, d2), (g1, b1),(d2, b1), (g2, n)}
|
172 |
+
... in => {(b1, n), (b2, n), (d2, n)}
|
173 |
+
... with => {(b1, g1), (g1, b1), (d1, b1), (b1, d1)}
|
174 |
+
... """
|
175 |
+
>>> val = Valuation.fromstring(v)
|
176 |
+
|
177 |
+
>>> print(val) # doctest: +SKIP
|
178 |
+
{'bark': set([('d1',), ('d2',)]),
|
179 |
+
'boy': set([('b1',), ('b2',)]),
|
180 |
+
'chase': set([('b1', 'g1'), ('g2', 'd2'), ('g1', 'd1'), ('b2', 'g1')]),
|
181 |
+
'dog': set([('d1',), ('d2',)]),
|
182 |
+
'fido': 'd1',
|
183 |
+
'girl': set([('g2',), ('g1',)]),
|
184 |
+
'in': set([('d2', 'n'), ('b1', 'n'), ('b2', 'n')]),
|
185 |
+
'john': 'b1',
|
186 |
+
'mary': 'g1',
|
187 |
+
'noosa': 'n',
|
188 |
+
'see': set([('b1', 'g1'), ('b2', 'd2'), ('d2', 'b1'), ('g2', 'n'), ('g1', 'b1')]),
|
189 |
+
'suzie': 'g2',
|
190 |
+
'tess': 'd2',
|
191 |
+
'walk': set([('d1',), ('b1',), ('g2',)]),
|
192 |
+
'with': set([('b1', 'g1'), ('d1', 'b1'), ('b1', 'd1'), ('g1', 'b1')])}
|
193 |
+
|
194 |
+
|
195 |
+
Unit tests for function argument application in a Model
|
196 |
+
-------------------------------------------------------
|
197 |
+
|
198 |
+
>>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\
|
199 |
+
... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
|
200 |
+
... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')])),
|
201 |
+
... ('kiss', null_binary_rel)]
|
202 |
+
>>> val = Valuation(v)
|
203 |
+
>>> dom = val.domain
|
204 |
+
>>> m = Model(dom, val)
|
205 |
+
>>> g = Assignment(dom)
|
206 |
+
>>> sorted(val['boy'])
|
207 |
+
[('b1',), ('b2',)]
|
208 |
+
>>> ('b1',) in val['boy']
|
209 |
+
True
|
210 |
+
>>> ('g1',) in val['boy']
|
211 |
+
False
|
212 |
+
>>> ('foo',) in val['boy']
|
213 |
+
False
|
214 |
+
>>> ('b1', 'g1') in val['love']
|
215 |
+
True
|
216 |
+
>>> ('b1', 'b1') in val['kiss']
|
217 |
+
False
|
218 |
+
>>> sorted(val.domain)
|
219 |
+
['b1', 'b2', 'd1', 'g1', 'g2']
|
220 |
+
|
221 |
+
|
222 |
+
Model Tests
|
223 |
+
===========
|
224 |
+
|
225 |
+
Extension of Lambda expressions
|
226 |
+
|
227 |
+
>>> v0 = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\
|
228 |
+
... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])),
|
229 |
+
... ('dog', set(['d1'])),
|
230 |
+
... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
|
231 |
+
|
232 |
+
>>> val0 = Valuation(v0)
|
233 |
+
>>> dom0 = val0.domain
|
234 |
+
>>> m0 = Model(dom0, val0)
|
235 |
+
>>> g0 = Assignment(dom0)
|
236 |
+
|
237 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)', g0) == {'g2': {'g2': False, 'b2': False, 'b1': True, 'g1': False, 'd1': False}, 'b2': {'g2': True, 'b2': False, 'b1': False, 'g1': False, 'd1': False}, 'b1': {'g2': False, 'b2': False, 'b1': False, 'g1': True, 'd1': False}, 'g1': {'g2': False, 'b2': False, 'b1': True, 'g1': False, 'd1': False}, 'd1': {'g2': False, 'b2': False, 'b1': False, 'g1': False, 'd1': False}})
|
238 |
+
True
|
239 |
+
>>> print(m0.evaluate(r'\x. dog(x) (adam)', g0))
|
240 |
+
False
|
241 |
+
>>> print(m0.evaluate(r'\x. (dog(x) | boy(x)) (adam)', g0))
|
242 |
+
True
|
243 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)(fido)', g0) == {'g2': False, 'b2': False, 'b1': False, 'g1': False, 'd1': False})
|
244 |
+
True
|
245 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)(adam)', g0) == {'g2': False, 'b2': False, 'b1': False, 'g1': True, 'd1': False})
|
246 |
+
True
|
247 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)(betty)', g0) == {'g2': False, 'b2': False, 'b1': True, 'g1': False, 'd1': False})
|
248 |
+
True
|
249 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)(betty)(adam)', g0))
|
250 |
+
True
|
251 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)(betty, adam)', g0))
|
252 |
+
True
|
253 |
+
>>> print(m0.evaluate(r'\y. \x. love(x, y)(fido)(adam)', g0))
|
254 |
+
False
|
255 |
+
>>> print(m0.evaluate(r'\y. \x. love(x, y)(betty, adam)', g0))
|
256 |
+
True
|
257 |
+
>>> print(m0.evaluate(r'\x. exists y. love(x, y)', g0) == {'g2': True, 'b2': True, 'b1': True, 'g1': True, 'd1': False})
|
258 |
+
True
|
259 |
+
>>> print(m0.evaluate(r'\z. adam', g0) == {'g2': 'b1', 'b2': 'b1', 'b1': 'b1', 'g1': 'b1', 'd1': 'b1'})
|
260 |
+
True
|
261 |
+
>>> print(m0.evaluate(r'\z. love(x, y)', g0) == {'g2': False, 'b2': False, 'b1': False, 'g1': False, 'd1': False})
|
262 |
+
True
|
263 |
+
|
264 |
+
|
265 |
+
Propositional Model Test
|
266 |
+
------------------------
|
267 |
+
|
268 |
+
>>> tests = [
|
269 |
+
... ('P & Q', True),
|
270 |
+
... ('P & R', False),
|
271 |
+
... ('- P', False),
|
272 |
+
... ('- R', True),
|
273 |
+
... ('- - P', True),
|
274 |
+
... ('- (P & R)', True),
|
275 |
+
... ('P | R', True),
|
276 |
+
... ('R | P', True),
|
277 |
+
... ('R | R', False),
|
278 |
+
... ('- P | R', False),
|
279 |
+
... ('P | - P', True),
|
280 |
+
... ('P -> Q', True),
|
281 |
+
... ('P -> R', False),
|
282 |
+
... ('R -> P', True),
|
283 |
+
... ('P <-> P', True),
|
284 |
+
... ('R <-> R', True),
|
285 |
+
... ('P <-> R', False),
|
286 |
+
... ]
|
287 |
+
>>> val1 = Valuation([('P', True), ('Q', True), ('R', False)])
|
288 |
+
>>> dom = set([])
|
289 |
+
>>> m = Model(dom, val1)
|
290 |
+
>>> g = Assignment(dom)
|
291 |
+
>>> for (sent, testvalue) in tests:
|
292 |
+
... semvalue = m.evaluate(sent, g)
|
293 |
+
... if semvalue == testvalue:
|
294 |
+
... print('*', end=' ')
|
295 |
+
* * * * * * * * * * * * * * * * *
|
296 |
+
|
297 |
+
|
298 |
+
Test of i Function
|
299 |
+
------------------
|
300 |
+
|
301 |
+
>>> from nltk.sem import Expression
|
302 |
+
>>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),
|
303 |
+
... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
|
304 |
+
... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
|
305 |
+
>>> val = Valuation(v)
|
306 |
+
>>> dom = val.domain
|
307 |
+
>>> m = Model(dom, val)
|
308 |
+
>>> g = Assignment(dom, [('x', 'b1'), ('y', 'g2')])
|
309 |
+
>>> exprs = ['adam', 'girl', 'love', 'walks', 'x', 'y', 'z']
|
310 |
+
>>> parsed_exprs = [Expression.fromstring(e) for e in exprs]
|
311 |
+
>>> sorted_set = lambda x: sorted(x) if isinstance(x, set) else x
|
312 |
+
>>> for parsed in parsed_exprs:
|
313 |
+
... try:
|
314 |
+
... print("'%s' gets value %s" % (parsed, sorted_set(m.i(parsed, g))))
|
315 |
+
... except Undefined:
|
316 |
+
... print("'%s' is Undefined" % parsed)
|
317 |
+
'adam' gets value b1
|
318 |
+
'girl' gets value [('g1',), ('g2',)]
|
319 |
+
'love' gets value [('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]
|
320 |
+
'walks' is Undefined
|
321 |
+
'x' gets value b1
|
322 |
+
'y' gets value g2
|
323 |
+
'z' is Undefined
|
324 |
+
|
325 |
+
Test for formulas in Model
|
326 |
+
--------------------------
|
327 |
+
|
328 |
+
>>> tests = [
|
329 |
+
... ('love(adam, betty)', True),
|
330 |
+
... ('love(adam, sue)', 'Undefined'),
|
331 |
+
... ('dog(fido)', True),
|
332 |
+
... ('- dog(fido)', False),
|
333 |
+
... ('- - dog(fido)', True),
|
334 |
+
... ('- dog(sue)', 'Undefined'),
|
335 |
+
... ('dog(fido) & boy(adam)', True),
|
336 |
+
... ('- (dog(fido) & boy(adam))', False),
|
337 |
+
... ('- dog(fido) & boy(adam)', False),
|
338 |
+
... ('dog(fido) | boy(adam)', True),
|
339 |
+
... ('- (dog(fido) | boy(adam))', False),
|
340 |
+
... ('- dog(fido) | boy(adam)', True),
|
341 |
+
... ('- dog(fido) | - boy(adam)', False),
|
342 |
+
... ('dog(fido) -> boy(adam)', True),
|
343 |
+
... ('- (dog(fido) -> boy(adam))', False),
|
344 |
+
... ('- dog(fido) -> boy(adam)', True),
|
345 |
+
... ('exists x . love(adam, x)', True),
|
346 |
+
... ('all x . love(adam, x)', False),
|
347 |
+
... ('fido = fido', True),
|
348 |
+
... ('exists x . all y. love(x, y)', False),
|
349 |
+
... ('exists x . (x = fido)', True),
|
350 |
+
... ('all x . (dog(x) | - dog(x))', True),
|
351 |
+
... ('adam = mia', 'Undefined'),
|
352 |
+
... ('\\x. (boy(x) | girl(x))', {'g2': True, 'b2': True, 'b1': True, 'g1': True, 'd1': False}),
|
353 |
+
... ('\\x. exists y. (boy(x) & love(x, y))', {'g2': False, 'b2': True, 'b1': True, 'g1': False, 'd1': False}),
|
354 |
+
... ('exists z1. boy(z1)', True),
|
355 |
+
... ('exists x. (boy(x) & - (x = adam))', True),
|
356 |
+
... ('exists x. (boy(x) & all y. love(y, x))', False),
|
357 |
+
... ('all x. (boy(x) | girl(x))', False),
|
358 |
+
... ('all x. (girl(x) -> exists y. boy(y) & love(x, y))', False),
|
359 |
+
... ('exists x. (boy(x) & all y. (girl(y) -> love(y, x)))', True),
|
360 |
+
... ('exists x. (boy(x) & all y. (girl(y) -> love(x, y)))', False),
|
361 |
+
... ('all x. (dog(x) -> - girl(x))', True),
|
362 |
+
... ('exists x. exists y. (love(x, y) & love(x, y))', True),
|
363 |
+
... ]
|
364 |
+
>>> for (sent, testvalue) in tests:
|
365 |
+
... semvalue = m.evaluate(sent, g)
|
366 |
+
... if semvalue == testvalue:
|
367 |
+
... print('*', end=' ')
|
368 |
+
... else:
|
369 |
+
... print(sent, semvalue)
|
370 |
+
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
371 |
+
|
372 |
+
|
373 |
+
|
374 |
+
Satisfier Tests
|
375 |
+
---------------
|
376 |
+
|
377 |
+
>>> formulas = [
|
378 |
+
... 'boy(x)',
|
379 |
+
... '(x = x)',
|
380 |
+
... '(boy(x) | girl(x))',
|
381 |
+
... '(boy(x) & girl(x))',
|
382 |
+
... 'love(adam, x)',
|
383 |
+
... 'love(x, adam)',
|
384 |
+
... '- (x = adam)',
|
385 |
+
... 'exists z22. love(x, z22)',
|
386 |
+
... 'exists y. love(y, x)',
|
387 |
+
... 'all y. (girl(y) -> love(x, y))',
|
388 |
+
... 'all y. (girl(y) -> love(y, x))',
|
389 |
+
... 'all y. (girl(y) -> (boy(x) & love(y, x)))',
|
390 |
+
... 'boy(x) & all y. (girl(y) -> love(x, y))',
|
391 |
+
... 'boy(x) & all y. (girl(y) -> love(y, x))',
|
392 |
+
... 'boy(x) & exists y. (girl(y) & love(y, x))',
|
393 |
+
... 'girl(x) -> dog(x)',
|
394 |
+
... 'all y. (dog(y) -> (x = y))',
|
395 |
+
... '- exists y. love(y, x)',
|
396 |
+
... 'exists y. (love(adam, y) & love(y, x))'
|
397 |
+
... ]
|
398 |
+
>>> g.purge()
|
399 |
+
>>> g.add('x', 'b1')
|
400 |
+
{'x': 'b1'}
|
401 |
+
>>> for f in formulas:
|
402 |
+
... try:
|
403 |
+
... print("'%s' gets value: %s" % (f, m.evaluate(f, g)))
|
404 |
+
... except Undefined:
|
405 |
+
... print("'%s' is Undefined" % f)
|
406 |
+
'boy(x)' gets value: True
|
407 |
+
'(x = x)' gets value: True
|
408 |
+
'(boy(x) | girl(x))' gets value: True
|
409 |
+
'(boy(x) & girl(x))' gets value: False
|
410 |
+
'love(adam, x)' gets value: False
|
411 |
+
'love(x, adam)' gets value: False
|
412 |
+
'- (x = adam)' gets value: False
|
413 |
+
'exists z22. love(x, z22)' gets value: True
|
414 |
+
'exists y. love(y, x)' gets value: True
|
415 |
+
'all y. (girl(y) -> love(x, y))' gets value: False
|
416 |
+
'all y. (girl(y) -> love(y, x))' gets value: True
|
417 |
+
'all y. (girl(y) -> (boy(x) & love(y, x)))' gets value: True
|
418 |
+
'boy(x) & all y. (girl(y) -> love(x, y))' gets value: False
|
419 |
+
'boy(x) & all y. (girl(y) -> love(y, x))' gets value: True
|
420 |
+
'boy(x) & exists y. (girl(y) & love(y, x))' gets value: True
|
421 |
+
'girl(x) -> dog(x)' gets value: True
|
422 |
+
'all y. (dog(y) -> (x = y))' gets value: False
|
423 |
+
'- exists y. love(y, x)' gets value: False
|
424 |
+
'exists y. (love(adam, y) & love(y, x))' gets value: True
|
425 |
+
|
426 |
+
>>> from nltk.sem import Expression
|
427 |
+
>>> for fmla in formulas:
|
428 |
+
... p = Expression.fromstring(fmla)
|
429 |
+
... g.purge()
|
430 |
+
... print("Satisfiers of '%s':\n\t%s" % (p, sorted(m.satisfiers(p, 'x', g))))
|
431 |
+
Satisfiers of 'boy(x)':
|
432 |
+
['b1', 'b2']
|
433 |
+
Satisfiers of '(x = x)':
|
434 |
+
['b1', 'b2', 'd1', 'g1', 'g2']
|
435 |
+
Satisfiers of '(boy(x) | girl(x))':
|
436 |
+
['b1', 'b2', 'g1', 'g2']
|
437 |
+
Satisfiers of '(boy(x) & girl(x))':
|
438 |
+
[]
|
439 |
+
Satisfiers of 'love(adam,x)':
|
440 |
+
['g1']
|
441 |
+
Satisfiers of 'love(x,adam)':
|
442 |
+
['g1', 'g2']
|
443 |
+
Satisfiers of '-(x = adam)':
|
444 |
+
['b2', 'd1', 'g1', 'g2']
|
445 |
+
Satisfiers of 'exists z22.love(x,z22)':
|
446 |
+
['b1', 'b2', 'g1', 'g2']
|
447 |
+
Satisfiers of 'exists y.love(y,x)':
|
448 |
+
['b1', 'g1', 'g2']
|
449 |
+
Satisfiers of 'all y.(girl(y) -> love(x,y))':
|
450 |
+
[]
|
451 |
+
Satisfiers of 'all y.(girl(y) -> love(y,x))':
|
452 |
+
['b1']
|
453 |
+
Satisfiers of 'all y.(girl(y) -> (boy(x) & love(y,x)))':
|
454 |
+
['b1']
|
455 |
+
Satisfiers of '(boy(x) & all y.(girl(y) -> love(x,y)))':
|
456 |
+
[]
|
457 |
+
Satisfiers of '(boy(x) & all y.(girl(y) -> love(y,x)))':
|
458 |
+
['b1']
|
459 |
+
Satisfiers of '(boy(x) & exists y.(girl(y) & love(y,x)))':
|
460 |
+
['b1']
|
461 |
+
Satisfiers of '(girl(x) -> dog(x))':
|
462 |
+
['b1', 'b2', 'd1']
|
463 |
+
Satisfiers of 'all y.(dog(y) -> (x = y))':
|
464 |
+
['d1']
|
465 |
+
Satisfiers of '-exists y.love(y,x)':
|
466 |
+
['b2', 'd1']
|
467 |
+
Satisfiers of 'exists y.(love(adam,y) & love(y,x))':
|
468 |
+
['b1']
|
469 |
+
|
470 |
+
|
471 |
+
Tests based on the Blackburn & Bos testsuite
|
472 |
+
--------------------------------------------
|
473 |
+
|
474 |
+
>>> v1 = [('jules', 'd1'), ('vincent', 'd2'), ('pumpkin', 'd3'),
|
475 |
+
... ('honey_bunny', 'd4'), ('yolanda', 'd5'),
|
476 |
+
... ('customer', set(['d1', 'd2'])),
|
477 |
+
... ('robber', set(['d3', 'd4'])),
|
478 |
+
... ('love', set([('d3', 'd4')]))]
|
479 |
+
>>> val1 = Valuation(v1)
|
480 |
+
>>> dom1 = val1.domain
|
481 |
+
>>> m1 = Model(dom1, val1)
|
482 |
+
>>> g1 = Assignment(dom1)
|
483 |
+
|
484 |
+
>>> v2 = [('jules', 'd1'), ('vincent', 'd2'), ('pumpkin', 'd3'),
|
485 |
+
... ('honey_bunny', 'd4'), ('yolanda', 'd4'),
|
486 |
+
... ('customer', set(['d1', 'd2', 'd5', 'd6'])),
|
487 |
+
... ('robber', set(['d3', 'd4'])),
|
488 |
+
... ('love', set([(None, None)]))]
|
489 |
+
>>> val2 = Valuation(v2)
|
490 |
+
>>> dom2 = set(['d1', 'd2', 'd3', 'd4', 'd5', 'd6'])
|
491 |
+
>>> m2 = Model(dom2, val2)
|
492 |
+
>>> g2 = Assignment(dom2)
|
493 |
+
>>> g21 = Assignment(dom2)
|
494 |
+
>>> g21.add('y', 'd3')
|
495 |
+
{'y': 'd3'}
|
496 |
+
|
497 |
+
>>> v3 = [('mia', 'd1'), ('jody', 'd2'), ('jules', 'd3'),
|
498 |
+
... ('vincent', 'd4'),
|
499 |
+
... ('woman', set(['d1', 'd2'])), ('man', set(['d3', 'd4'])),
|
500 |
+
... ('joke', set(['d5', 'd6'])), ('episode', set(['d7', 'd8'])),
|
501 |
+
... ('in', set([('d5', 'd7'), ('d5', 'd8')])),
|
502 |
+
... ('tell', set([('d1', 'd5'), ('d2', 'd6')]))]
|
503 |
+
>>> val3 = Valuation(v3)
|
504 |
+
>>> dom3 = set(['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8'])
|
505 |
+
>>> m3 = Model(dom3, val3)
|
506 |
+
>>> g3 = Assignment(dom3)
|
507 |
+
|
508 |
+
>>> tests = [
|
509 |
+
... ('exists x. robber(x)', m1, g1, True),
|
510 |
+
... ('exists x. exists y. love(y, x)', m1, g1, True),
|
511 |
+
... ('exists x0. exists x1. love(x1, x0)', m2, g2, False),
|
512 |
+
... ('all x. all y. love(y, x)', m2, g2, False),
|
513 |
+
... ('- (all x. all y. love(y, x))', m2, g2, True),
|
514 |
+
... ('all x. all y. - love(y, x)', m2, g2, True),
|
515 |
+
... ('yolanda = honey_bunny', m2, g2, True),
|
516 |
+
... ('mia = honey_bunny', m2, g2, 'Undefined'),
|
517 |
+
... ('- (yolanda = honey_bunny)', m2, g2, False),
|
518 |
+
... ('- (mia = honey_bunny)', m2, g2, 'Undefined'),
|
519 |
+
... ('all x. (robber(x) | customer(x))', m2, g2, True),
|
520 |
+
... ('- (all x. (robber(x) | customer(x)))', m2, g2, False),
|
521 |
+
... ('(robber(x) | customer(x))', m2, g2, 'Undefined'),
|
522 |
+
... ('(robber(y) | customer(y))', m2, g21, True),
|
523 |
+
... ('exists x. (man(x) & exists x. woman(x))', m3, g3, True),
|
524 |
+
... ('exists x. (man(x) & exists x. woman(x))', m3, g3, True),
|
525 |
+
... ('- exists x. woman(x)', m3, g3, False),
|
526 |
+
... ('exists x. (tasty(x) & burger(x))', m3, g3, 'Undefined'),
|
527 |
+
... ('- exists x. (tasty(x) & burger(x))', m3, g3, 'Undefined'),
|
528 |
+
... ('exists x. (man(x) & - exists y. woman(y))', m3, g3, False),
|
529 |
+
... ('exists x. (man(x) & - exists x. woman(x))', m3, g3, False),
|
530 |
+
... ('exists x. (woman(x) & - exists x. customer(x))', m2, g2, 'Undefined'),
|
531 |
+
... ]
|
532 |
+
|
533 |
+
>>> for item in tests:
|
534 |
+
... sentence, model, g, testvalue = item
|
535 |
+
... semvalue = model.evaluate(sentence, g)
|
536 |
+
... if semvalue == testvalue:
|
537 |
+
... print('*', end=' ')
|
538 |
+
... g.purge()
|
539 |
+
* * * * * * * * * * * * * * * * * * * * * *
|
540 |
+
|
541 |
+
|
542 |
+
Tests for mapping from syntax to semantics
|
543 |
+
------------------------------------------
|
544 |
+
|
545 |
+
Load a valuation from a file.
|
546 |
+
|
547 |
+
>>> import nltk.data
|
548 |
+
>>> from nltk.sem.util import parse_sents
|
549 |
+
>>> val = nltk.data.load('grammars/sample_grammars/valuation1.val')
|
550 |
+
>>> dom = val.domain
|
551 |
+
>>> m = Model(dom, val)
|
552 |
+
>>> g = Assignment(dom)
|
553 |
+
>>> gramfile = 'grammars/sample_grammars/sem2.fcfg'
|
554 |
+
>>> inputs = ['John sees a girl', 'every dog barks']
|
555 |
+
>>> parses = parse_sents(inputs, gramfile)
|
556 |
+
>>> for sent, trees in zip(inputs, parses):
|
557 |
+
... print()
|
558 |
+
... print("Sentence: %s" % sent)
|
559 |
+
... for tree in trees:
|
560 |
+
... print("Parse:\n %s" %tree)
|
561 |
+
... print("Semantics: %s" % root_semrep(tree))
|
562 |
+
<BLANKLINE>
|
563 |
+
Sentence: John sees a girl
|
564 |
+
Parse:
|
565 |
+
(S[SEM=<exists x.(girl(x) & see(john,x))>]
|
566 |
+
(NP[-LOC, NUM='sg', SEM=<\P.P(john)>]
|
567 |
+
(PropN[-LOC, NUM='sg', SEM=<\P.P(john)>] John))
|
568 |
+
(VP[NUM='sg', SEM=<\y.exists x.(girl(x) & see(y,x))>]
|
569 |
+
(TV[NUM='sg', SEM=<\X y.X(\x.see(y,x))>, TNS='pres'] sees)
|
570 |
+
(NP[NUM='sg', SEM=<\Q.exists x.(girl(x) & Q(x))>]
|
571 |
+
(Det[NUM='sg', SEM=<\P Q.exists x.(P(x) & Q(x))>] a)
|
572 |
+
(Nom[NUM='sg', SEM=<\x.girl(x)>]
|
573 |
+
(N[NUM='sg', SEM=<\x.girl(x)>] girl)))))
|
574 |
+
Semantics: exists x.(girl(x) & see(john,x))
|
575 |
+
<BLANKLINE>
|
576 |
+
Sentence: every dog barks
|
577 |
+
Parse:
|
578 |
+
(S[SEM=<all x.(dog(x) -> bark(x))>]
|
579 |
+
(NP[NUM='sg', SEM=<\Q.all x.(dog(x) -> Q(x))>]
|
580 |
+
(Det[NUM='sg', SEM=<\P Q.all x.(P(x) -> Q(x))>] every)
|
581 |
+
(Nom[NUM='sg', SEM=<\x.dog(x)>]
|
582 |
+
(N[NUM='sg', SEM=<\x.dog(x)>] dog)))
|
583 |
+
(VP[NUM='sg', SEM=<\x.bark(x)>]
|
584 |
+
(IV[NUM='sg', SEM=<\x.bark(x)>, TNS='pres'] barks)))
|
585 |
+
Semantics: all x.(dog(x) -> bark(x))
|
586 |
+
|
587 |
+
>>> sent = "every dog barks"
|
588 |
+
>>> result = nltk.sem.util.interpret_sents([sent], gramfile)[0]
|
589 |
+
>>> for (syntree, semrep) in result:
|
590 |
+
... print(syntree)
|
591 |
+
... print()
|
592 |
+
... print(semrep)
|
593 |
+
(S[SEM=<all x.(dog(x) -> bark(x))>]
|
594 |
+
(NP[NUM='sg', SEM=<\Q.all x.(dog(x) -> Q(x))>]
|
595 |
+
(Det[NUM='sg', SEM=<\P Q.all x.(P(x) -> Q(x))>] every)
|
596 |
+
(Nom[NUM='sg', SEM=<\x.dog(x)>]
|
597 |
+
(N[NUM='sg', SEM=<\x.dog(x)>] dog)))
|
598 |
+
(VP[NUM='sg', SEM=<\x.bark(x)>]
|
599 |
+
(IV[NUM='sg', SEM=<\x.bark(x)>, TNS='pres'] barks)))
|
600 |
+
<BLANKLINE>
|
601 |
+
all x.(dog(x) -> bark(x))
|
602 |
+
|
603 |
+
>>> result = nltk.sem.util.evaluate_sents([sent], gramfile, m, g)[0]
|
604 |
+
>>> for (syntree, semrel, value) in result:
|
605 |
+
... print(syntree)
|
606 |
+
... print()
|
607 |
+
... print(semrep)
|
608 |
+
... print()
|
609 |
+
... print(value)
|
610 |
+
(S[SEM=<all x.(dog(x) -> bark(x))>]
|
611 |
+
(NP[NUM='sg', SEM=<\Q.all x.(dog(x) -> Q(x))>]
|
612 |
+
(Det[NUM='sg', SEM=<\P Q.all x.(P(x) -> Q(x))>] every)
|
613 |
+
(Nom[NUM='sg', SEM=<\x.dog(x)>]
|
614 |
+
(N[NUM='sg', SEM=<\x.dog(x)>] dog)))
|
615 |
+
(VP[NUM='sg', SEM=<\x.bark(x)>]
|
616 |
+
(IV[NUM='sg', SEM=<\x.bark(x)>, TNS='pres'] barks)))
|
617 |
+
<BLANKLINE>
|
618 |
+
all x.(dog(x) -> bark(x))
|
619 |
+
<BLANKLINE>
|
620 |
+
True
|
621 |
+
|
622 |
+
>>> sents = ['Mary walks', 'John sees a dog']
|
623 |
+
>>> results = nltk.sem.util.interpret_sents(sents, 'grammars/sample_grammars/sem2.fcfg')
|
624 |
+
>>> for result in results:
|
625 |
+
... for (synrep, semrep) in result:
|
626 |
+
... print(synrep)
|
627 |
+
(S[SEM=<walk(mary)>]
|
628 |
+
(NP[-LOC, NUM='sg', SEM=<\P.P(mary)>]
|
629 |
+
(PropN[-LOC, NUM='sg', SEM=<\P.P(mary)>] Mary))
|
630 |
+
(VP[NUM='sg', SEM=<\x.walk(x)>]
|
631 |
+
(IV[NUM='sg', SEM=<\x.walk(x)>, TNS='pres'] walks)))
|
632 |
+
(S[SEM=<exists x.(dog(x) & see(john,x))>]
|
633 |
+
(NP[-LOC, NUM='sg', SEM=<\P.P(john)>]
|
634 |
+
(PropN[-LOC, NUM='sg', SEM=<\P.P(john)>] John))
|
635 |
+
(VP[NUM='sg', SEM=<\y.exists x.(dog(x) & see(y,x))>]
|
636 |
+
(TV[NUM='sg', SEM=<\X y.X(\x.see(y,x))>, TNS='pres'] sees)
|
637 |
+
(NP[NUM='sg', SEM=<\Q.exists x.(dog(x) & Q(x))>]
|
638 |
+
(Det[NUM='sg', SEM=<\P Q.exists x.(P(x) & Q(x))>] a)
|
639 |
+
(Nom[NUM='sg', SEM=<\x.dog(x)>]
|
640 |
+
(N[NUM='sg', SEM=<\x.dog(x)>] dog)))))
|
641 |
+
|
642 |
+
Cooper Storage
|
643 |
+
--------------
|
644 |
+
|
645 |
+
>>> from nltk.sem import cooper_storage as cs
|
646 |
+
>>> sentence = 'every girl chases a dog'
|
647 |
+
>>> trees = cs.parse_with_bindops(sentence, grammar='grammars/book_grammars/storage.fcfg')
|
648 |
+
>>> semrep = trees[0].label()['SEM']
|
649 |
+
>>> cs_semrep = cs.CooperStore(semrep)
|
650 |
+
>>> print(cs_semrep.core)
|
651 |
+
chase(z2,z4)
|
652 |
+
>>> for bo in cs_semrep.store:
|
653 |
+
... print(bo)
|
654 |
+
bo(\P.all x.(girl(x) -> P(x)),z2)
|
655 |
+
bo(\P.exists x.(dog(x) & P(x)),z4)
|
656 |
+
>>> cs_semrep.s_retrieve(trace=True)
|
657 |
+
Permutation 1
|
658 |
+
(\P.all x.(girl(x) -> P(x)))(\z2.chase(z2,z4))
|
659 |
+
(\P.exists x.(dog(x) & P(x)))(\z4.all x.(girl(x) -> chase(x,z4)))
|
660 |
+
Permutation 2
|
661 |
+
(\P.exists x.(dog(x) & P(x)))(\z4.chase(z2,z4))
|
662 |
+
(\P.all x.(girl(x) -> P(x)))(\z2.exists x.(dog(x) & chase(z2,x)))
|
663 |
+
|
664 |
+
>>> for reading in cs_semrep.readings:
|
665 |
+
... print(reading)
|
666 |
+
exists x.(dog(x) & all z3.(girl(z3) -> chase(z3,x)))
|
667 |
+
all x.(girl(x) -> exists z4.(dog(z4) & chase(x,z4)))
|
lib/python3.10/site-packages/nltk/test/sentiment.doctest
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===================
|
5 |
+
Sentiment Analysis
|
6 |
+
===================
|
7 |
+
|
8 |
+
>>> from nltk.classify import NaiveBayesClassifier
|
9 |
+
>>> from nltk.corpus import subjectivity
|
10 |
+
>>> from nltk.sentiment import SentimentAnalyzer
|
11 |
+
>>> from nltk.sentiment.util import *
|
12 |
+
|
13 |
+
>>> n_instances = 100
|
14 |
+
>>> subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]]
|
15 |
+
>>> obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]]
|
16 |
+
>>> len(subj_docs), len(obj_docs)
|
17 |
+
(100, 100)
|
18 |
+
|
19 |
+
Each document is represented by a tuple (sentence, label). The sentence is tokenized,
|
20 |
+
so it is represented by a list of strings:
|
21 |
+
|
22 |
+
>>> subj_docs[0]
|
23 |
+
(['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one',
|
24 |
+
'thing', 'is', 'a', 'small', 'gem', '.'], 'subj')
|
25 |
+
|
26 |
+
We separately split subjective and objective instances to keep a balanced uniform
|
27 |
+
class distribution in both train and test sets.
|
28 |
+
|
29 |
+
>>> train_subj_docs = subj_docs[:80]
|
30 |
+
>>> test_subj_docs = subj_docs[80:100]
|
31 |
+
>>> train_obj_docs = obj_docs[:80]
|
32 |
+
>>> test_obj_docs = obj_docs[80:100]
|
33 |
+
>>> training_docs = train_subj_docs+train_obj_docs
|
34 |
+
>>> testing_docs = test_subj_docs+test_obj_docs
|
35 |
+
|
36 |
+
>>> sentim_analyzer = SentimentAnalyzer()
|
37 |
+
>>> all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
|
38 |
+
|
39 |
+
We use simple unigram word features, handling negation:
|
40 |
+
|
41 |
+
>>> unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
|
42 |
+
>>> len(unigram_feats)
|
43 |
+
83
|
44 |
+
>>> sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
|
45 |
+
|
46 |
+
We apply features to obtain a feature-value representation of our datasets:
|
47 |
+
|
48 |
+
>>> training_set = sentim_analyzer.apply_features(training_docs)
|
49 |
+
>>> test_set = sentim_analyzer.apply_features(testing_docs)
|
50 |
+
|
51 |
+
We can now train our classifier on the training set, and subsequently output the
|
52 |
+
evaluation results:
|
53 |
+
|
54 |
+
>>> trainer = NaiveBayesClassifier.train
|
55 |
+
>>> classifier = sentim_analyzer.train(trainer, training_set)
|
56 |
+
Training classifier
|
57 |
+
>>> for key,value in sorted(sentim_analyzer.evaluate(test_set).items()):
|
58 |
+
... print('{0}: {1}'.format(key, value))
|
59 |
+
Evaluating NaiveBayesClassifier results...
|
60 |
+
Accuracy: 0.8
|
61 |
+
F-measure [obj]: 0.8
|
62 |
+
F-measure [subj]: 0.8
|
63 |
+
Precision [obj]: 0.8
|
64 |
+
Precision [subj]: 0.8
|
65 |
+
Recall [obj]: 0.8
|
66 |
+
Recall [subj]: 0.8
|
67 |
+
|
68 |
+
|
69 |
+
Vader
|
70 |
+
------
|
71 |
+
|
72 |
+
>>> from nltk.sentiment.vader import SentimentIntensityAnalyzer
|
73 |
+
>>> sentences = ["VADER is smart, handsome, and funny.", # positive sentence example
|
74 |
+
... "VADER is smart, handsome, and funny!", # punctuation emphasis handled correctly (sentiment intensity adjusted)
|
75 |
+
... "VADER is very smart, handsome, and funny.", # booster words handled correctly (sentiment intensity adjusted)
|
76 |
+
... "VADER is VERY SMART, handsome, and FUNNY.", # emphasis for ALLCAPS handled
|
77 |
+
... "VADER is VERY SMART, handsome, and FUNNY!!!",# combination of signals - VADER appropriately adjusts intensity
|
78 |
+
... "VADER is VERY SMART, really handsome, and INCREDIBLY FUNNY!!!",# booster words & punctuation make this close to ceiling for score
|
79 |
+
... "The book was good.", # positive sentence
|
80 |
+
... "The book was kind of good.", # qualified positive sentence is handled correctly (intensity adjusted)
|
81 |
+
... "The plot was good, but the characters are uncompelling and the dialog is not great.", # mixed negation sentence
|
82 |
+
... "A really bad, horrible book.", # negative sentence with booster words
|
83 |
+
... "At least it isn't a horrible book.", # negated negative sentence with contraction
|
84 |
+
... ":) and :D", # emoticons handled
|
85 |
+
... "", # an empty string is correctly handled
|
86 |
+
... "Today sux", # negative slang handled
|
87 |
+
... "Today sux!", # negative slang with punctuation emphasis handled
|
88 |
+
... "Today SUX!", # negative slang with capitalization emphasis
|
89 |
+
... "Today kinda sux! But I'll get by, lol" # mixed sentiment example with slang and constrastive conjunction "but"
|
90 |
+
... ]
|
91 |
+
>>> paragraph = "It was one of the worst movies I've seen, despite good reviews. \
|
92 |
+
... Unbelievably bad acting!! Poor direction. VERY poor production. \
|
93 |
+
... The movie was bad. Very bad movie. VERY bad movie. VERY BAD movie. VERY BAD movie!"
|
94 |
+
|
95 |
+
>>> from nltk import tokenize
|
96 |
+
>>> lines_list = tokenize.sent_tokenize(paragraph)
|
97 |
+
>>> sentences.extend(lines_list)
|
98 |
+
|
99 |
+
>>> tricky_sentences = [
|
100 |
+
... "Most automated sentiment analysis tools are shit.",
|
101 |
+
... "VADER sentiment analysis is the shit.",
|
102 |
+
... "Sentiment analysis has never been good.",
|
103 |
+
... "Sentiment analysis with VADER has never been this good.",
|
104 |
+
... "Warren Beatty has never been so entertaining.",
|
105 |
+
... "I won't say that the movie is astounding and I wouldn't claim that \
|
106 |
+
... the movie is too banal either.",
|
107 |
+
... "I like to hate Michael Bay films, but I couldn't fault this one",
|
108 |
+
... "I like to hate Michael Bay films, BUT I couldn't help but fault this one",
|
109 |
+
... "It's one thing to watch an Uwe Boll film, but another thing entirely \
|
110 |
+
... to pay for it",
|
111 |
+
... "The movie was too good",
|
112 |
+
... "This movie was actually neither that funny, nor super witty.",
|
113 |
+
... "This movie doesn't care about cleverness, wit or any other kind of \
|
114 |
+
... intelligent humor.",
|
115 |
+
... "Those who find ugly meanings in beautiful things are corrupt without \
|
116 |
+
... being charming.",
|
117 |
+
... "There are slow and repetitive parts, BUT it has just enough spice to \
|
118 |
+
... keep it interesting.",
|
119 |
+
... "The script is not fantastic, but the acting is decent and the cinematography \
|
120 |
+
... is EXCELLENT!",
|
121 |
+
... "Roger Dodger is one of the most compelling variations on this theme.",
|
122 |
+
... "Roger Dodger is one of the least compelling variations on this theme.",
|
123 |
+
... "Roger Dodger is at least compelling as a variation on the theme.",
|
124 |
+
... "they fall in love with the product",
|
125 |
+
... "but then it breaks",
|
126 |
+
... "usually around the time the 90 day warranty expires",
|
127 |
+
... "the twin towers collapsed today",
|
128 |
+
... "However, Mr. Carter solemnly argues, his client carried out the kidnapping \
|
129 |
+
... under orders and in the ''least offensive way possible.''"
|
130 |
+
... ]
|
131 |
+
>>> sentences.extend(tricky_sentences)
|
132 |
+
>>> for sentence in sentences:
|
133 |
+
... sid = SentimentIntensityAnalyzer()
|
134 |
+
... print(sentence)
|
135 |
+
... ss = sid.polarity_scores(sentence)
|
136 |
+
... for k in sorted(ss):
|
137 |
+
... print('{0}: {1}, '.format(k, ss[k]), end='')
|
138 |
+
... print()
|
139 |
+
VADER is smart, handsome, and funny.
|
140 |
+
compound: 0.8316, neg: 0.0, neu: 0.254, pos: 0.746,
|
141 |
+
VADER is smart, handsome, and funny!
|
142 |
+
compound: 0.8439, neg: 0.0, neu: 0.248, pos: 0.752,
|
143 |
+
VADER is very smart, handsome, and funny.
|
144 |
+
compound: 0.8545, neg: 0.0, neu: 0.299, pos: 0.701,
|
145 |
+
VADER is VERY SMART, handsome, and FUNNY.
|
146 |
+
compound: 0.9227, neg: 0.0, neu: 0.246, pos: 0.754,
|
147 |
+
VADER is VERY SMART, handsome, and FUNNY!!!
|
148 |
+
compound: 0.9342, neg: 0.0, neu: 0.233, pos: 0.767,
|
149 |
+
VADER is VERY SMART, really handsome, and INCREDIBLY FUNNY!!!
|
150 |
+
compound: 0.9469, neg: 0.0, neu: 0.294, pos: 0.706,
|
151 |
+
The book was good.
|
152 |
+
compound: 0.4404, neg: 0.0, neu: 0.508, pos: 0.492,
|
153 |
+
The book was kind of good.
|
154 |
+
compound: 0.3832, neg: 0.0, neu: 0.657, pos: 0.343,
|
155 |
+
The plot was good, but the characters are uncompelling and the dialog is not great.
|
156 |
+
compound: -0.7042, neg: 0.327, neu: 0.579, pos: 0.094,
|
157 |
+
A really bad, horrible book.
|
158 |
+
compound: -0.8211, neg: 0.791, neu: 0.209, pos: 0.0,
|
159 |
+
At least it isn't a horrible book.
|
160 |
+
compound: 0.431, neg: 0.0, neu: 0.637, pos: 0.363,
|
161 |
+
:) and :D
|
162 |
+
compound: 0.7925, neg: 0.0, neu: 0.124, pos: 0.876,
|
163 |
+
<BLANKLINE>
|
164 |
+
compound: 0.0, neg: 0.0, neu: 0.0, pos: 0.0,
|
165 |
+
Today sux
|
166 |
+
compound: -0.3612, neg: 0.714, neu: 0.286, pos: 0.0,
|
167 |
+
Today sux!
|
168 |
+
compound: -0.4199, neg: 0.736, neu: 0.264, pos: 0.0,
|
169 |
+
Today SUX!
|
170 |
+
compound: -0.5461, neg: 0.779, neu: 0.221, pos: 0.0,
|
171 |
+
Today kinda sux! But I'll get by, lol
|
172 |
+
compound: 0.5249, neg: 0.138, neu: 0.517, pos: 0.344,
|
173 |
+
It was one of the worst movies I've seen, despite good reviews.
|
174 |
+
compound: -0.7584, neg: 0.394, neu: 0.606, pos: 0.0,
|
175 |
+
Unbelievably bad acting!!
|
176 |
+
compound: -0.6572, neg: 0.686, neu: 0.314, pos: 0.0,
|
177 |
+
Poor direction.
|
178 |
+
compound: -0.4767, neg: 0.756, neu: 0.244, pos: 0.0,
|
179 |
+
VERY poor production.
|
180 |
+
compound: -0.6281, neg: 0.674, neu: 0.326, pos: 0.0,
|
181 |
+
The movie was bad.
|
182 |
+
compound: -0.5423, neg: 0.538, neu: 0.462, pos: 0.0,
|
183 |
+
Very bad movie.
|
184 |
+
compound: -0.5849, neg: 0.655, neu: 0.345, pos: 0.0,
|
185 |
+
VERY bad movie.
|
186 |
+
compound: -0.6732, neg: 0.694, neu: 0.306, pos: 0.0,
|
187 |
+
VERY BAD movie.
|
188 |
+
compound: -0.7398, neg: 0.724, neu: 0.276, pos: 0.0,
|
189 |
+
VERY BAD movie!
|
190 |
+
compound: -0.7616, neg: 0.735, neu: 0.265, pos: 0.0,
|
191 |
+
Most automated sentiment analysis tools are shit.
|
192 |
+
compound: -0.5574, neg: 0.375, neu: 0.625, pos: 0.0,
|
193 |
+
VADER sentiment analysis is the shit.
|
194 |
+
compound: 0.6124, neg: 0.0, neu: 0.556, pos: 0.444,
|
195 |
+
Sentiment analysis has never been good.
|
196 |
+
compound: -0.3412, neg: 0.325, neu: 0.675, pos: 0.0,
|
197 |
+
Sentiment analysis with VADER has never been this good.
|
198 |
+
compound: 0.5228, neg: 0.0, neu: 0.703, pos: 0.297,
|
199 |
+
Warren Beatty has never been so entertaining.
|
200 |
+
compound: 0.5777, neg: 0.0, neu: 0.616, pos: 0.384,
|
201 |
+
I won't say that the movie is astounding and I wouldn't claim that the movie is too banal either.
|
202 |
+
compound: 0.4215, neg: 0.0, neu: 0.851, pos: 0.149,
|
203 |
+
I like to hate Michael Bay films, but I couldn't fault this one
|
204 |
+
compound: 0.3153, neg: 0.157, neu: 0.534, pos: 0.309,
|
205 |
+
I like to hate Michael Bay films, BUT I couldn't help but fault this one
|
206 |
+
compound: -0.1531, neg: 0.277, neu: 0.477, pos: 0.246,
|
207 |
+
It's one thing to watch an Uwe Boll film, but another thing entirely to pay for it
|
208 |
+
compound: -0.2541, neg: 0.112, neu: 0.888, pos: 0.0,
|
209 |
+
The movie was too good
|
210 |
+
compound: 0.4404, neg: 0.0, neu: 0.58, pos: 0.42,
|
211 |
+
This movie was actually neither that funny, nor super witty.
|
212 |
+
compound: -0.6759, neg: 0.41, neu: 0.59, pos: 0.0,
|
213 |
+
This movie doesn't care about cleverness, wit or any other kind of intelligent humor.
|
214 |
+
compound: -0.1338, neg: 0.265, neu: 0.497, pos: 0.239,
|
215 |
+
Those who find ugly meanings in beautiful things are corrupt without being charming.
|
216 |
+
compound: -0.3553, neg: 0.314, neu: 0.493, pos: 0.192,
|
217 |
+
There are slow and repetitive parts, BUT it has just enough spice to keep it interesting.
|
218 |
+
compound: 0.4678, neg: 0.079, neu: 0.735, pos: 0.186,
|
219 |
+
The script is not fantastic, but the acting is decent and the cinematography is EXCELLENT!
|
220 |
+
compound: 0.7565, neg: 0.092, neu: 0.607, pos: 0.301,
|
221 |
+
Roger Dodger is one of the most compelling variations on this theme.
|
222 |
+
compound: 0.2944, neg: 0.0, neu: 0.834, pos: 0.166,
|
223 |
+
Roger Dodger is one of the least compelling variations on this theme.
|
224 |
+
compound: -0.1695, neg: 0.132, neu: 0.868, pos: 0.0,
|
225 |
+
Roger Dodger is at least compelling as a variation on the theme.
|
226 |
+
compound: 0.2263, neg: 0.0, neu: 0.84, pos: 0.16,
|
227 |
+
they fall in love with the product
|
228 |
+
compound: 0.6369, neg: 0.0, neu: 0.588, pos: 0.412,
|
229 |
+
but then it breaks
|
230 |
+
compound: 0.0, neg: 0.0, neu: 1.0, pos: 0.0,
|
231 |
+
usually around the time the 90 day warranty expires
|
232 |
+
compound: 0.0, neg: 0.0, neu: 1.0, pos: 0.0,
|
233 |
+
the twin towers collapsed today
|
234 |
+
compound: -0.2732, neg: 0.344, neu: 0.656, pos: 0.0,
|
235 |
+
However, Mr. Carter solemnly argues, his client carried out the kidnapping under orders and in the ''least offensive way possible.''
|
236 |
+
compound: -0.5859, neg: 0.23, neu: 0.697, pos: 0.074,
|
lib/python3.10/site-packages/nltk/test/tag.doctest
ADDED
@@ -0,0 +1,475 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
Evaluation of Taggers
|
5 |
+
=====================
|
6 |
+
|
7 |
+
Evaluating the standard NLTK PerceptronTagger using Accuracy,
|
8 |
+
Precision, Recall and F-measure for each of the tags.
|
9 |
+
|
10 |
+
>>> from nltk.tag import PerceptronTagger
|
11 |
+
>>> from nltk.corpus import treebank
|
12 |
+
>>> tagger = PerceptronTagger()
|
13 |
+
>>> gold_data = treebank.tagged_sents()[10:20]
|
14 |
+
>>> print(tagger.accuracy(gold_data)) # doctest: +ELLIPSIS
|
15 |
+
0.885931...
|
16 |
+
|
17 |
+
>>> print(tagger.evaluate_per_tag(gold_data))
|
18 |
+
Tag | Prec. | Recall | F-measure
|
19 |
+
-------+--------+--------+-----------
|
20 |
+
'' | 1.0000 | 1.0000 | 1.0000
|
21 |
+
, | 1.0000 | 1.0000 | 1.0000
|
22 |
+
-NONE- | 0.0000 | 0.0000 | 0.0000
|
23 |
+
. | 1.0000 | 1.0000 | 1.0000
|
24 |
+
: | 1.0000 | 1.0000 | 1.0000
|
25 |
+
CC | 1.0000 | 1.0000 | 1.0000
|
26 |
+
CD | 0.7647 | 1.0000 | 0.8667
|
27 |
+
DT | 1.0000 | 1.0000 | 1.0000
|
28 |
+
IN | 1.0000 | 1.0000 | 1.0000
|
29 |
+
JJ | 0.5882 | 0.8333 | 0.6897
|
30 |
+
JJR | 1.0000 | 1.0000 | 1.0000
|
31 |
+
JJS | 1.0000 | 1.0000 | 1.0000
|
32 |
+
NN | 0.7647 | 0.9630 | 0.8525
|
33 |
+
NNP | 0.8929 | 1.0000 | 0.9434
|
34 |
+
NNS | 1.0000 | 1.0000 | 1.0000
|
35 |
+
POS | 1.0000 | 1.0000 | 1.0000
|
36 |
+
PRP | 1.0000 | 1.0000 | 1.0000
|
37 |
+
RB | 0.8000 | 1.0000 | 0.8889
|
38 |
+
RBR | 0.0000 | 0.0000 | 0.0000
|
39 |
+
TO | 1.0000 | 1.0000 | 1.0000
|
40 |
+
VB | 1.0000 | 1.0000 | 1.0000
|
41 |
+
VBD | 0.8571 | 0.9231 | 0.8889
|
42 |
+
VBG | 1.0000 | 1.0000 | 1.0000
|
43 |
+
VBN | 0.8333 | 0.5556 | 0.6667
|
44 |
+
VBP | 0.5714 | 0.8000 | 0.6667
|
45 |
+
VBZ | 1.0000 | 1.0000 | 1.0000
|
46 |
+
WP | 1.0000 | 1.0000 | 1.0000
|
47 |
+
`` | 1.0000 | 1.0000 | 1.0000
|
48 |
+
<BLANKLINE>
|
49 |
+
|
50 |
+
List only the 10 most common tags:
|
51 |
+
|
52 |
+
>>> print(tagger.evaluate_per_tag(gold_data, truncate=10, sort_by_count=True))
|
53 |
+
Tag | Prec. | Recall | F-measure
|
54 |
+
-------+--------+--------+-----------
|
55 |
+
IN | 1.0000 | 1.0000 | 1.0000
|
56 |
+
DT | 1.0000 | 1.0000 | 1.0000
|
57 |
+
NN | 0.7647 | 0.9630 | 0.8525
|
58 |
+
NNP | 0.8929 | 1.0000 | 0.9434
|
59 |
+
NNS | 1.0000 | 1.0000 | 1.0000
|
60 |
+
-NONE- | 0.0000 | 0.0000 | 0.0000
|
61 |
+
CD | 0.7647 | 1.0000 | 0.8667
|
62 |
+
VBD | 0.8571 | 0.9231 | 0.8889
|
63 |
+
JJ | 0.5882 | 0.8333 | 0.6897
|
64 |
+
, | 1.0000 | 1.0000 | 1.0000
|
65 |
+
<BLANKLINE>
|
66 |
+
|
67 |
+
Similarly, we can display the confusion matrix for this tagger.
|
68 |
+
|
69 |
+
>>> print(tagger.confusion(gold_data))
|
70 |
+
| - |
|
71 |
+
| N |
|
72 |
+
| O |
|
73 |
+
| N J J N N P P R V V V V V |
|
74 |
+
| ' E C C D I J J J N N N O R R B T V B B B B B W ` |
|
75 |
+
| ' , - . : C D T N J R S N P S S P B R O B D G N P Z P ` |
|
76 |
+
-------+-------------------------------------------------------------------------------------+
|
77 |
+
'' | <3> . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
78 |
+
, | .<11> . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
79 |
+
-NONE- | . . <.> . . . 4 . . 4 . . 7 2 . . . 1 . . . . . . 3 . . . |
|
80 |
+
. | . . .<10> . . . . . . . . . . . . . . . . . . . . . . . . |
|
81 |
+
: | . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . |
|
82 |
+
CC | . . . . . <5> . . . . . . . . . . . . . . . . . . . . . . |
|
83 |
+
CD | . . . . . .<13> . . . . . . . . . . . . . . . . . . . . . |
|
84 |
+
DT | . . . . . . .<28> . . . . . . . . . . . . . . . . . . . . |
|
85 |
+
IN | . . . . . . . .<34> . . . . . . . . . . . . . . . . . . . |
|
86 |
+
JJ | . . . . . . . . .<10> . . . 1 . . . . 1 . . . . . . . . . |
|
87 |
+
JJR | . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . |
|
88 |
+
JJS | . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . |
|
89 |
+
NN | . . . . . . . . . 1 . .<26> . . . . . . . . . . . . . . . |
|
90 |
+
NNP | . . . . . . . . . . . . .<25> . . . . . . . . . . . . . . |
|
91 |
+
NNS | . . . . . . . . . . . . . .<22> . . . . . . . . . . . . . |
|
92 |
+
POS | . . . . . . . . . . . . . . . <1> . . . . . . . . . . . . |
|
93 |
+
PRP | . . . . . . . . . . . . . . . . <3> . . . . . . . . . . . |
|
94 |
+
RB | . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . |
|
95 |
+
RBR | . . . . . . . . . . . . . . . . . . <.> . . . . . . . . . |
|
96 |
+
TO | . . . . . . . . . . . . . . . . . . . <2> . . . . . . . . |
|
97 |
+
VB | . . . . . . . . . . . . . . . . . . . . <1> . . . . . . . |
|
98 |
+
VBD | . . . . . . . . . . . . . . . . . . . . .<12> . 1 . . . . |
|
99 |
+
VBG | . . . . . . . . . . . . . . . . . . . . . . <3> . . . . . |
|
100 |
+
VBN | . . . . . . . . . 2 . . . . . . . . . . . 2 . <5> . . . . |
|
101 |
+
VBP | . . . . . . . . . . . . 1 . . . . . . . . . . . <4> . . . |
|
102 |
+
VBZ | . . . . . . . . . . . . . . . . . . . . . . . . . <2> . . |
|
103 |
+
WP | . . . . . . . . . . . . . . . . . . . . . . . . . . <3> . |
|
104 |
+
`` | . . . . . . . . . . . . . . . . . . . . . . . . . . . <3>|
|
105 |
+
-------+-------------------------------------------------------------------------------------+
|
106 |
+
(row = reference; col = test)
|
107 |
+
<BLANKLINE>
|
108 |
+
|
109 |
+
Brill Trainer with evaluation
|
110 |
+
=============================
|
111 |
+
|
112 |
+
>>> # Perform the relevant imports.
|
113 |
+
>>> from nltk.tbl.template import Template
|
114 |
+
>>> from nltk.tag.brill import Pos, Word
|
115 |
+
>>> from nltk.tag import untag, RegexpTagger, BrillTaggerTrainer, UnigramTagger
|
116 |
+
|
117 |
+
>>> # Load some data
|
118 |
+
>>> from nltk.corpus import treebank
|
119 |
+
>>> training_data = treebank.tagged_sents()[:100]
|
120 |
+
>>> baseline_data = treebank.tagged_sents()[100:200]
|
121 |
+
>>> gold_data = treebank.tagged_sents()[200:300]
|
122 |
+
>>> testing_data = [untag(s) for s in gold_data]
|
123 |
+
|
124 |
+
>>> backoff = RegexpTagger([
|
125 |
+
... (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
|
126 |
+
... (r'(The|the|A|a|An|an)$', 'AT'), # articles
|
127 |
+
... (r'.*able$', 'JJ'), # adjectives
|
128 |
+
... (r'.*ness$', 'NN'), # nouns formed from adjectives
|
129 |
+
... (r'.*ly$', 'RB'), # adverbs
|
130 |
+
... (r'.*s$', 'NNS'), # plural nouns
|
131 |
+
... (r'.*ing$', 'VBG'), # gerunds
|
132 |
+
... (r'.*ed$', 'VBD'), # past tense verbs
|
133 |
+
... (r'.*', 'NN') # nouns (default)
|
134 |
+
... ])
|
135 |
+
|
136 |
+
We've now created a simple ``RegexpTagger``, which tags according to the regular expression
|
137 |
+
rules it has been supplied. This tagger in and of itself does not have a great accuracy.
|
138 |
+
|
139 |
+
>>> backoff.accuracy(gold_data) #doctest: +ELLIPSIS
|
140 |
+
0.245014...
|
141 |
+
|
142 |
+
Neither does a simple ``UnigramTagger``. This tagger is trained on some data,
|
143 |
+
and will then first try to match unigrams (i.e. tokens) of the sentence it has
|
144 |
+
to tag to the learned data.
|
145 |
+
|
146 |
+
>>> unigram_tagger = UnigramTagger(baseline_data)
|
147 |
+
>>> unigram_tagger.accuracy(gold_data) #doctest: +ELLIPSIS
|
148 |
+
0.581196...
|
149 |
+
|
150 |
+
The lackluster accuracy here can be explained with the following example:
|
151 |
+
|
152 |
+
>>> unigram_tagger.tag(["I", "would", "like", "this", "sentence", "to", "be", "tagged"])
|
153 |
+
[('I', 'NNP'), ('would', 'MD'), ('like', None), ('this', 'DT'), ('sentence', None),
|
154 |
+
('to', 'TO'), ('be', 'VB'), ('tagged', None)]
|
155 |
+
|
156 |
+
As you can see, many tokens are tagged as ``None``, as these tokens are OOV (out of vocabulary).
|
157 |
+
The ``UnigramTagger`` has never seen them, and as a result they are not in its database of known terms.
|
158 |
+
|
159 |
+
In practice, a ``UnigramTagger`` is exclusively used in conjunction with a *backoff*. Our real
|
160 |
+
baseline which will use such a backoff. We'll create a ``UnigramTagger`` like before, but now
|
161 |
+
the ``RegexpTagger`` will be used as a backoff for the situations where the ``UnigramTagger``
|
162 |
+
encounters an OOV token.
|
163 |
+
|
164 |
+
>>> baseline = UnigramTagger(baseline_data, backoff=backoff)
|
165 |
+
>>> baseline.accuracy(gold_data) #doctest: +ELLIPSIS
|
166 |
+
0.7537647...
|
167 |
+
|
168 |
+
That is already much better. We can investigate the performance further by running
|
169 |
+
``evaluate_per_tag``. This method will output the *Precision*, *Recall* and *F-measure*
|
170 |
+
of each tag.
|
171 |
+
|
172 |
+
>>> print(baseline.evaluate_per_tag(gold_data, sort_by_count=True))
|
173 |
+
Tag | Prec. | Recall | F-measure
|
174 |
+
-------+--------+--------+-----------
|
175 |
+
NNP | 0.9674 | 0.2738 | 0.4269
|
176 |
+
NN | 0.4111 | 0.9136 | 0.5670
|
177 |
+
IN | 0.9383 | 0.9580 | 0.9480
|
178 |
+
DT | 0.9819 | 0.8859 | 0.9314
|
179 |
+
JJ | 0.8167 | 0.2970 | 0.4356
|
180 |
+
NNS | 0.7393 | 0.9630 | 0.8365
|
181 |
+
-NONE- | 1.0000 | 0.8345 | 0.9098
|
182 |
+
, | 1.0000 | 1.0000 | 1.0000
|
183 |
+
. | 1.0000 | 1.0000 | 1.0000
|
184 |
+
VBD | 0.6429 | 0.8804 | 0.7431
|
185 |
+
CD | 1.0000 | 0.9872 | 0.9935
|
186 |
+
CC | 1.0000 | 0.9355 | 0.9667
|
187 |
+
VB | 0.7778 | 0.3684 | 0.5000
|
188 |
+
VBN | 0.9375 | 0.3000 | 0.4545
|
189 |
+
RB | 0.7778 | 0.7447 | 0.7609
|
190 |
+
TO | 1.0000 | 1.0000 | 1.0000
|
191 |
+
VBZ | 0.9643 | 0.6429 | 0.7714
|
192 |
+
VBG | 0.6415 | 0.9444 | 0.7640
|
193 |
+
PRP$ | 1.0000 | 1.0000 | 1.0000
|
194 |
+
PRP | 1.0000 | 0.5556 | 0.7143
|
195 |
+
MD | 1.0000 | 1.0000 | 1.0000
|
196 |
+
VBP | 0.6471 | 0.5789 | 0.6111
|
197 |
+
POS | 1.0000 | 1.0000 | 1.0000
|
198 |
+
$ | 1.0000 | 0.8182 | 0.9000
|
199 |
+
'' | 1.0000 | 1.0000 | 1.0000
|
200 |
+
: | 1.0000 | 1.0000 | 1.0000
|
201 |
+
WDT | 0.4000 | 0.2000 | 0.2667
|
202 |
+
`` | 1.0000 | 1.0000 | 1.0000
|
203 |
+
JJR | 1.0000 | 0.5000 | 0.6667
|
204 |
+
NNPS | 0.0000 | 0.0000 | 0.0000
|
205 |
+
RBR | 1.0000 | 1.0000 | 1.0000
|
206 |
+
-LRB- | 0.0000 | 0.0000 | 0.0000
|
207 |
+
-RRB- | 0.0000 | 0.0000 | 0.0000
|
208 |
+
RP | 0.6667 | 0.6667 | 0.6667
|
209 |
+
EX | 0.5000 | 0.5000 | 0.5000
|
210 |
+
JJS | 0.0000 | 0.0000 | 0.0000
|
211 |
+
WP | 1.0000 | 1.0000 | 1.0000
|
212 |
+
PDT | 0.0000 | 0.0000 | 0.0000
|
213 |
+
AT | 0.0000 | 0.0000 | 0.0000
|
214 |
+
<BLANKLINE>
|
215 |
+
|
216 |
+
It's clear that although the precision of tagging `"NNP"` is high, the recall is very low.
|
217 |
+
With other words, we're missing a lot of cases where the true label is `"NNP"`. We can see
|
218 |
+
a similar effect with `"JJ"`.
|
219 |
+
|
220 |
+
We can also see a very expected result: The precision of `"NN"` is low, while the recall
|
221 |
+
is high. If a term is OOV (i.e. ``UnigramTagger`` defers it to ``RegexpTagger``) and
|
222 |
+
``RegexpTagger`` doesn't have a good rule for it, then it will be tagged as `"NN"`. So,
|
223 |
+
we catch almost all tokens that are truly labeled as `"NN"`, but we also tag as `"NN"`
|
224 |
+
for many tokens that shouldn't be `"NN"`.
|
225 |
+
|
226 |
+
This method gives us some insight in what parts of the tagger needs more attention, and why.
|
227 |
+
However, it doesn't tell us what the terms with true label `"NNP"` or `"JJ"` are actually
|
228 |
+
tagged as.
|
229 |
+
To help that, we can create a confusion matrix.
|
230 |
+
|
231 |
+
>>> print(baseline.confusion(gold_data))
|
232 |
+
| - |
|
233 |
+
| - N - |
|
234 |
+
| L O R N P |
|
235 |
+
| R N R J J N N N P P P R R V V V V V W |
|
236 |
+
| ' B E B A C C D E I J J J M N N P N D O R P R B R T V B B B B B D W ` |
|
237 |
+
| $ ' , - - - . : T C D T X N J R S D N P S S T S P $ B R P O B D G N P Z T P ` |
|
238 |
+
-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
239 |
+
$ | <9> . . . . . . . . . . . . . . . . . 2 . . . . . . . . . . . . . . . . . . . . |
|
240 |
+
'' | . <10> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
241 |
+
, | . .<115> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
242 |
+
-LRB- | . . . <.> . . . . . . . . . . . . . . 3 . . . . . . . . . . . . . . . . . . . . |
|
243 |
+
-NONE- | . . . .<121> . . . . . . . . . . . . . 24 . . . . . . . . . . . . . . . . . . . . |
|
244 |
+
-RRB- | . . . . . <.> . . . . . . . . . . . . 3 . . . . . . . . . . . . . . . . . . . . |
|
245 |
+
. | . . . . . .<100> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
246 |
+
: | . . . . . . . <10> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
247 |
+
AT | . . . . . . . . <.> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
248 |
+
CC | . . . . . . . . . <58> . . . . . . . . 4 . . . . . . . . . . . . . . . . . . . . |
|
249 |
+
CD | . . . . . . . . . . <77> . . . . . . . 1 . . . . . . . . . . . . . . . . . . . . |
|
250 |
+
DT | . . . . . . . . 1 . .<163> . 4 . . . . 13 . . . . . . . . . . . . . . . . . 3 . . |
|
251 |
+
EX | . . . . . . . . . . . . <1> . . . . . 1 . . . . . . . . . . . . . . . . . . . . |
|
252 |
+
IN | . . . . . . . . . . . . .<228> . . . . 8 . . . . . . . . . . . . . 2 . . . . . . |
|
253 |
+
JJ | . . . . . . . . . . . . . . <49> . . . 86 2 . 4 . . . . 6 . . . . 12 3 . 3 . . . . |
|
254 |
+
JJR | . . . . . . . . . . . . . . . <3> . . 3 . . . . . . . . . . . . . . . . . . . . |
|
255 |
+
JJS | . . . . . . . . . . . . . . . . <.> . 2 . . . . . . . . . . . . . . . . . . . . |
|
256 |
+
MD | . . . . . . . . . . . . . . . . . <19> . . . . . . . . . . . . . . . . . . . . . |
|
257 |
+
NN | . . . . . . . . . . . . . . 9 . . .<296> . . 5 . . . . . . . . 5 . 9 . . . . . . |
|
258 |
+
NNP | . . . . . . . . . . . 2 . . . . . . 199 <89> . 26 . . . . 2 . . . . 2 5 . . . . . . |
|
259 |
+
NNPS | . . . . . . . . . . . . . . . . . . . 1 <.> 3 . . . . . . . . . . . . . . . . . |
|
260 |
+
NNS | . . . . . . . . . . . . . . . . . . 5 . .<156> . . . . . . . . . . . . . 1 . . . |
|
261 |
+
PDT | . . . . . . . . . . . 1 . . . . . . . . . . <.> . . . . . . . . . . . . . . . . |
|
262 |
+
POS | . . . . . . . . . . . . . . . . . . . . . . . <14> . . . . . . . . . . . . . . . |
|
263 |
+
PRP | . . . . . . . . . . . . . . . . . . 10 . . 2 . . <15> . . . . . . . . . . . . . . |
|
264 |
+
PRP$ | . . . . . . . . . . . . . . . . . . . . . . . . . <28> . . . . . . . . . . . . . |
|
265 |
+
RB | . . . . . . . . . . . . 1 4 . . . . 6 . . . . . . . <35> . 1 . . . . . . . . . . |
|
266 |
+
RBR | . . . . . . . . . . . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . |
|
267 |
+
RP | . . . . . . . . . . . . . . . . . . . . . . . . . . 1 . <2> . . . . . . . . . . |
|
268 |
+
TO | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <47> . . . . . . . . . |
|
269 |
+
VB | . . . . . . . . . . . . . . 2 . . . 30 . . . . . . . 1 . . . <21> . . . 3 . . . . |
|
270 |
+
VBD | . . . . . . . . . . . . . . . . . . 10 . . . . . . . . . . . . <81> . 1 . . . . . |
|
271 |
+
VBG | . . . . . . . . . . . . . . . . . . 2 . . . . . . . . . . . . . <34> . . . . . . |
|
272 |
+
VBN | . . . . . . . . . . . . . . . . . . 4 . . . . . . . . . . . . 31 . <15> . . . . . |
|
273 |
+
VBP | . . . . . . . . . . . . . . . . . . 7 . . . . . . . . . . . 1 . . . <11> . . . . |
|
274 |
+
VBZ | . . . . . . . . . . . . . . . . . . . . . 15 . . . . . . . . . . . . . <27> . . . |
|
275 |
+
WDT | . . . . . . . . . . . . . 7 . . . . 1 . . . . . . . . . . . . . . . . . <2> . . |
|
276 |
+
WP | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <2> . |
|
277 |
+
`` | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <10>|
|
278 |
+
-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
279 |
+
(row = reference; col = test)
|
280 |
+
<BLANKLINE>
|
281 |
+
|
282 |
+
Once again we can see that `"NN"` is the default if the tagger isn't sure. Beyond that,
|
283 |
+
we can see why the recall for `"NNP"` is so low: these tokens are often tagged as `"NN"`.
|
284 |
+
This effect can also be seen for `"JJ"`, where the majority of tokens that ought to be
|
285 |
+
tagged as `"JJ"` are actually tagged as `"NN"` by our tagger.
|
286 |
+
|
287 |
+
This tagger will only serve as a baseline for the ``BrillTaggerTrainer``, which uses
|
288 |
+
templates to attempt to improve the performance of the tagger.
|
289 |
+
|
290 |
+
>>> # Set up templates
|
291 |
+
>>> Template._cleartemplates() #clear any templates created in earlier tests
|
292 |
+
>>> templates = [Template(Pos([-1])), Template(Pos([-1]), Word([0]))]
|
293 |
+
|
294 |
+
>>> # Construct a BrillTaggerTrainer
|
295 |
+
>>> tt = BrillTaggerTrainer(baseline, templates, trace=3)
|
296 |
+
>>> tagger1 = tt.train(training_data, max_rules=10)
|
297 |
+
TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: None)
|
298 |
+
Finding initial useful rules...
|
299 |
+
Found 618 useful rules.
|
300 |
+
<BLANKLINE>
|
301 |
+
B |
|
302 |
+
S F r O | Score = Fixed - Broken
|
303 |
+
c i o t | R Fixed = num tags changed incorrect -> correct
|
304 |
+
o x k h | u Broken = num tags changed correct -> incorrect
|
305 |
+
r e e e | l Other = num tags changed incorrect -> incorrect
|
306 |
+
e d n r | e
|
307 |
+
------------------+-------------------------------------------------------
|
308 |
+
13 14 1 4 | NN->VB if Pos:TO@[-1]
|
309 |
+
8 8 0 0 | NN->VB if Pos:MD@[-1]
|
310 |
+
7 10 3 22 | NN->IN if Pos:NNS@[-1]
|
311 |
+
5 5 0 0 | NN->VBP if Pos:PRP@[-1]
|
312 |
+
5 5 0 0 | VBD->VBN if Pos:VBZ@[-1]
|
313 |
+
5 5 0 0 | NNS->NN if Pos:IN@[-1] & Word:asbestos@[0]
|
314 |
+
4 4 0 0 | NN->-NONE- if Pos:WP@[-1]
|
315 |
+
4 4 0 3 | NN->NNP if Pos:-NONE-@[-1]
|
316 |
+
4 6 2 2 | NN->NNP if Pos:NNP@[-1]
|
317 |
+
4 4 0 0 | NNS->VBZ if Pos:PRP@[-1]
|
318 |
+
|
319 |
+
>>> tagger1.rules()[1:3]
|
320 |
+
(Rule('000', 'NN', 'VB', [(Pos([-1]),'MD')]), Rule('000', 'NN', 'IN', [(Pos([-1]),'NNS')]))
|
321 |
+
|
322 |
+
>>> tagger1.print_template_statistics(printunused=False)
|
323 |
+
TEMPLATE STATISTICS (TRAIN) 2 templates, 10 rules)
|
324 |
+
TRAIN ( 2417 tokens) initial 555 0.7704 final: 496 0.7948
|
325 |
+
#ID | Score (train) | #Rules | Template
|
326 |
+
--------------------------------------------
|
327 |
+
000 | 54 0.915 | 9 0.900 | Template(Pos([-1]))
|
328 |
+
001 | 5 0.085 | 1 0.100 | Template(Pos([-1]),Word([0]))
|
329 |
+
<BLANKLINE>
|
330 |
+
<BLANKLINE>
|
331 |
+
|
332 |
+
>>> tagger1.accuracy(gold_data) # doctest: +ELLIPSIS
|
333 |
+
0.769230...
|
334 |
+
|
335 |
+
>>> print(tagger1.evaluate_per_tag(gold_data, sort_by_count=True))
|
336 |
+
Tag | Prec. | Recall | F-measure
|
337 |
+
-------+--------+--------+-----------
|
338 |
+
NNP | 0.8298 | 0.3600 | 0.5021
|
339 |
+
NN | 0.4435 | 0.8364 | 0.5797
|
340 |
+
IN | 0.8476 | 0.9580 | 0.8994
|
341 |
+
DT | 0.9819 | 0.8859 | 0.9314
|
342 |
+
JJ | 0.8167 | 0.2970 | 0.4356
|
343 |
+
NNS | 0.7464 | 0.9630 | 0.8410
|
344 |
+
-NONE- | 1.0000 | 0.8414 | 0.9139
|
345 |
+
, | 1.0000 | 1.0000 | 1.0000
|
346 |
+
. | 1.0000 | 1.0000 | 1.0000
|
347 |
+
VBD | 0.6723 | 0.8696 | 0.7583
|
348 |
+
CD | 1.0000 | 0.9872 | 0.9935
|
349 |
+
CC | 1.0000 | 0.9355 | 0.9667
|
350 |
+
VB | 0.8103 | 0.8246 | 0.8174
|
351 |
+
VBN | 0.9130 | 0.4200 | 0.5753
|
352 |
+
RB | 0.7778 | 0.7447 | 0.7609
|
353 |
+
TO | 1.0000 | 1.0000 | 1.0000
|
354 |
+
VBZ | 0.9667 | 0.6905 | 0.8056
|
355 |
+
VBG | 0.6415 | 0.9444 | 0.7640
|
356 |
+
PRP$ | 1.0000 | 1.0000 | 1.0000
|
357 |
+
PRP | 1.0000 | 0.5556 | 0.7143
|
358 |
+
MD | 1.0000 | 1.0000 | 1.0000
|
359 |
+
VBP | 0.6316 | 0.6316 | 0.6316
|
360 |
+
POS | 1.0000 | 1.0000 | 1.0000
|
361 |
+
$ | 1.0000 | 0.8182 | 0.9000
|
362 |
+
'' | 1.0000 | 1.0000 | 1.0000
|
363 |
+
: | 1.0000 | 1.0000 | 1.0000
|
364 |
+
WDT | 0.4000 | 0.2000 | 0.2667
|
365 |
+
`` | 1.0000 | 1.0000 | 1.0000
|
366 |
+
JJR | 1.0000 | 0.5000 | 0.6667
|
367 |
+
NNPS | 0.0000 | 0.0000 | 0.0000
|
368 |
+
RBR | 1.0000 | 1.0000 | 1.0000
|
369 |
+
-LRB- | 0.0000 | 0.0000 | 0.0000
|
370 |
+
-RRB- | 0.0000 | 0.0000 | 0.0000
|
371 |
+
RP | 0.6667 | 0.6667 | 0.6667
|
372 |
+
EX | 0.5000 | 0.5000 | 0.5000
|
373 |
+
JJS | 0.0000 | 0.0000 | 0.0000
|
374 |
+
WP | 1.0000 | 1.0000 | 1.0000
|
375 |
+
PDT | 0.0000 | 0.0000 | 0.0000
|
376 |
+
AT | 0.0000 | 0.0000 | 0.0000
|
377 |
+
<BLANKLINE>
|
378 |
+
|
379 |
+
>>> print(tagger1.confusion(gold_data))
|
380 |
+
| - |
|
381 |
+
| - N - |
|
382 |
+
| L O R N P |
|
383 |
+
| R N R J J N N N P P P R R V V V V V W |
|
384 |
+
| ' B E B A C C D E I J J J M N N P N D O R P R B R T V B B B B B D W ` |
|
385 |
+
| $ ' , - - - . : T C D T X N J R S D N P S S T S P $ B R P O B D G N P Z T P ` |
|
386 |
+
-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
387 |
+
$ | <9> . . . . . . . . . . . . . . . . . 1 . . . . . . . . . . . 1 . . . . . . . . |
|
388 |
+
'' | . <10> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
389 |
+
, | . .<115> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
390 |
+
-LRB- | . . . <.> . . . . . . . . . 1 . . . . 2 . . . . . . . . . . . . . . . . . . . . |
|
391 |
+
-NONE- | . . . .<122> . . . . . . . . 1 . . . . 22 . . . . . . . . . . . . . . . . . . . . |
|
392 |
+
-RRB- | . . . . . <.> . . . . . . . . . . . . 2 1 . . . . . . . . . . . . . . . . . . . |
|
393 |
+
. | . . . . . .<100> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
394 |
+
: | . . . . . . . <10> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
395 |
+
AT | . . . . . . . . <.> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
|
396 |
+
CC | . . . . . . . . . <58> . . . . . . . . 2 1 . . . . . . . . . . . . . . 1 . . . . |
|
397 |
+
CD | . . . . . . . . . . <77> . . . . . . . 1 . . . . . . . . . . . . . . . . . . . . |
|
398 |
+
DT | . . . . . . . . 1 . .<163> . 5 . . . . 12 . . . . . . . . . . . . . . . . . 3 . . |
|
399 |
+
EX | . . . . . . . . . . . . <1> . . . . . 1 . . . . . . . . . . . . . . . . . . . . |
|
400 |
+
IN | . . . . . . . . . . . . .<228> . . . . 8 . . . . . . . . . . . . . 2 . . . . . . |
|
401 |
+
JJ | . . . . . . . . . . . . . 4 <49> . . . 79 4 . 4 . . . . 6 . . . 1 12 3 . 3 . . . . |
|
402 |
+
JJR | . . . . . . . . . . . . . 2 . <3> . . 1 . . . . . . . . . . . . . . . . . . . . |
|
403 |
+
JJS | . . . . . . . . . . . . . . . . <.> . 2 . . . . . . . . . . . . . . . . . . . . |
|
404 |
+
MD | . . . . . . . . . . . . . . . . . <19> . . . . . . . . . . . . . . . . . . . . . |
|
405 |
+
NN | . . . . . . . . . . . . . 7 9 . . .<271> 16 . 5 . . . . . . . . 7 . 9 . . . . . . |
|
406 |
+
NNP | . . . . . . . . . . . 2 . 7 . . . . 163<117> . 26 . . . . 2 . . . 1 2 5 . . . . . . |
|
407 |
+
NNPS | . . . . . . . . . . . . . . . . . . . 1 <.> 3 . . . . . . . . . . . . . . . . . |
|
408 |
+
NNS | . . . . . . . . . . . . . . . . . . 5 . .<156> . . . . . . . . . . . . . 1 . . . |
|
409 |
+
PDT | . . . . . . . . . . . 1 . . . . . . . . . . <.> . . . . . . . . . . . . . . . . |
|
410 |
+
POS | . . . . . . . . . . . . . . . . . . . . . . . <14> . . . . . . . . . . . . . . . |
|
411 |
+
PRP | . . . . . . . . . . . . . . . . . . 10 . . 2 . . <15> . . . . . . . . . . . . . . |
|
412 |
+
PRP$ | . . . . . . . . . . . . . . . . . . . . . . . . . <28> . . . . . . . . . . . . . |
|
413 |
+
RB | . . . . . . . . . . . . 1 4 . . . . 6 . . . . . . . <35> . 1 . . . . . . . . . . |
|
414 |
+
RBR | . . . . . . . . . . . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . |
|
415 |
+
RP | . . . . . . . . . . . . . . . . . . . . . . . . . . 1 . <2> . . . . . . . . . . |
|
416 |
+
TO | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <47> . . . . . . . . . |
|
417 |
+
VB | . . . . . . . . . . . . . . 2 . . . 4 . . . . . . . 1 . . . <47> . . . 3 . . . . |
|
418 |
+
VBD | . . . . . . . . . . . . . 1 . . . . 8 1 . . . . . . . . . . . <80> . 2 . . . . . |
|
419 |
+
VBG | . . . . . . . . . . . . . . . . . . 2 . . . . . . . . . . . . . <34> . . . . . . |
|
420 |
+
VBN | . . . . . . . . . . . . . . . . . . 4 . . . . . . . . . . . . 25 . <21> . . . . . |
|
421 |
+
VBP | . . . . . . . . . . . . . 2 . . . . 4 . . . . . . . . . . . 1 . . . <12> . . . . |
|
422 |
+
VBZ | . . . . . . . . . . . . . . . . . . . . . 13 . . . . . . . . . . . . . <29> . . . |
|
423 |
+
WDT | . . . . . . . . . . . . . 7 . . . . 1 . . . . . . . . . . . . . . . . . <2> . . |
|
424 |
+
WP | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <2> . |
|
425 |
+
`` | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <10>|
|
426 |
+
-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
427 |
+
(row = reference; col = test)
|
428 |
+
<BLANKLINE>
|
429 |
+
|
430 |
+
>>> tagged, test_stats = tagger1.batch_tag_incremental(testing_data, gold_data)
|
431 |
+
>>> tagged[33][12:]
|
432 |
+
[('foreign', 'NN'), ('debt', 'NN'), ('of', 'IN'), ('$', '$'), ('64', 'CD'),
|
433 |
+
('billion', 'CD'), ('*U*', '-NONE-'), ('--', ':'), ('the', 'DT'), ('third-highest', 'NN'),
|
434 |
+
('in', 'IN'), ('the', 'DT'), ('developing', 'VBG'), ('world', 'NN'), ('.', '.')]
|
435 |
+
|
436 |
+
Regression Tests
|
437 |
+
~~~~~~~~~~~~~~~~
|
438 |
+
|
439 |
+
Sequential Taggers
|
440 |
+
------------------
|
441 |
+
|
442 |
+
Add tests for:
|
443 |
+
- make sure backoff is being done correctly.
|
444 |
+
- make sure ngram taggers don't use previous sentences for context.
|
445 |
+
- make sure ngram taggers see 'beginning of the sentence' as a
|
446 |
+
unique context
|
447 |
+
- make sure regexp tagger's regexps are tried in order
|
448 |
+
- train on some simple examples, & make sure that the size & the
|
449 |
+
generated models are correct.
|
450 |
+
- make sure cutoff works as intended
|
451 |
+
- make sure that ngram models only exclude contexts covered by the
|
452 |
+
backoff tagger if the backoff tagger gets that context correct at
|
453 |
+
*all* locations.
|
454 |
+
|
455 |
+
|
456 |
+
Regression Testing for issue #1025
|
457 |
+
==================================
|
458 |
+
|
459 |
+
We want to ensure that a RegexpTagger can be created with more than 100 patterns
|
460 |
+
and does not fail with: "AssertionError: sorry, but this version only supports 100 named groups"
|
461 |
+
|
462 |
+
>>> from nltk.tag import RegexpTagger
|
463 |
+
>>> patterns = [(str(i), 'NNP',) for i in range(200)]
|
464 |
+
>>> tagger = RegexpTagger(patterns)
|
465 |
+
|
466 |
+
Regression Testing for issue #2483
|
467 |
+
==================================
|
468 |
+
|
469 |
+
Ensure that tagging with pos_tag (PerceptronTagger) does not throw an IndexError
|
470 |
+
when attempting tagging an empty string. What it must return instead is not
|
471 |
+
strictly defined.
|
472 |
+
|
473 |
+
>>> from nltk.tag import pos_tag
|
474 |
+
>>> pos_tag(['', 'is', 'a', 'beautiful', 'day'])
|
475 |
+
[...]
|
lib/python3.10/site-packages/nltk/test/treetransforms.doctest
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
-------------------------------------------
|
5 |
+
Unit tests for the TreeTransformation class
|
6 |
+
-------------------------------------------
|
7 |
+
|
8 |
+
>>> from copy import deepcopy
|
9 |
+
>>> from nltk.tree import Tree, collapse_unary, chomsky_normal_form, un_chomsky_normal_form
|
10 |
+
|
11 |
+
>>> tree_string = "(TOP (S (S (VP (VBN Turned) (ADVP (RB loose)) (PP (IN in) (NP (NP (NNP Shane) (NNP Longman) (POS 's)) (NN trading) (NN room))))) (, ,) (NP (DT the) (NN yuppie) (NNS dealers)) (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) (. .)))"
|
12 |
+
|
13 |
+
>>> tree = Tree.fromstring(tree_string)
|
14 |
+
>>> print(tree)
|
15 |
+
(TOP
|
16 |
+
(S
|
17 |
+
(S
|
18 |
+
(VP
|
19 |
+
(VBN Turned)
|
20 |
+
(ADVP (RB loose))
|
21 |
+
(PP
|
22 |
+
(IN in)
|
23 |
+
(NP
|
24 |
+
(NP (NNP Shane) (NNP Longman) (POS 's))
|
25 |
+
(NN trading)
|
26 |
+
(NN room)))))
|
27 |
+
(, ,)
|
28 |
+
(NP (DT the) (NN yuppie) (NNS dealers))
|
29 |
+
(VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
|
30 |
+
(. .)))
|
31 |
+
|
32 |
+
Make a copy of the original tree and collapse the subtrees with only one child
|
33 |
+
|
34 |
+
>>> collapsedTree = deepcopy(tree)
|
35 |
+
>>> collapse_unary(collapsedTree)
|
36 |
+
>>> print(collapsedTree)
|
37 |
+
(TOP
|
38 |
+
(S
|
39 |
+
(S+VP
|
40 |
+
(VBN Turned)
|
41 |
+
(ADVP (RB loose))
|
42 |
+
(PP
|
43 |
+
(IN in)
|
44 |
+
(NP
|
45 |
+
(NP (NNP Shane) (NNP Longman) (POS 's))
|
46 |
+
(NN trading)
|
47 |
+
(NN room))))
|
48 |
+
(, ,)
|
49 |
+
(NP (DT the) (NN yuppie) (NNS dealers))
|
50 |
+
(VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
|
51 |
+
(. .)))
|
52 |
+
|
53 |
+
>>> collapsedTree2 = deepcopy(tree)
|
54 |
+
>>> collapse_unary(collapsedTree2, collapsePOS=True, collapseRoot=True)
|
55 |
+
>>> print(collapsedTree2)
|
56 |
+
(TOP+S
|
57 |
+
(S+VP
|
58 |
+
(VBN Turned)
|
59 |
+
(ADVP+RB loose)
|
60 |
+
(PP
|
61 |
+
(IN in)
|
62 |
+
(NP
|
63 |
+
(NP (NNP Shane) (NNP Longman) (POS 's))
|
64 |
+
(NN trading)
|
65 |
+
(NN room))))
|
66 |
+
(, ,)
|
67 |
+
(NP (DT the) (NN yuppie) (NNS dealers))
|
68 |
+
(VP (AUX do) (NP (NP+RB little) (ADJP+RB right)))
|
69 |
+
(. .))
|
70 |
+
|
71 |
+
Convert the tree to Chomsky Normal Form i.e. each subtree has either two
|
72 |
+
subtree children or a single leaf value. This conversion can be performed
|
73 |
+
using either left- or right-factoring.
|
74 |
+
|
75 |
+
>>> cnfTree = deepcopy(collapsedTree)
|
76 |
+
>>> chomsky_normal_form(cnfTree, factor='left')
|
77 |
+
>>> print(cnfTree)
|
78 |
+
(TOP
|
79 |
+
(S
|
80 |
+
(S|<S+VP-,-NP-VP>
|
81 |
+
(S|<S+VP-,-NP>
|
82 |
+
(S|<S+VP-,>
|
83 |
+
(S+VP
|
84 |
+
(S+VP|<VBN-ADVP> (VBN Turned) (ADVP (RB loose)))
|
85 |
+
(PP
|
86 |
+
(IN in)
|
87 |
+
(NP
|
88 |
+
(NP|<NP-NN>
|
89 |
+
(NP
|
90 |
+
(NP|<NNP-NNP> (NNP Shane) (NNP Longman))
|
91 |
+
(POS 's))
|
92 |
+
(NN trading))
|
93 |
+
(NN room))))
|
94 |
+
(, ,))
|
95 |
+
(NP (NP|<DT-NN> (DT the) (NN yuppie)) (NNS dealers)))
|
96 |
+
(VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))))
|
97 |
+
(. .)))
|
98 |
+
|
99 |
+
>>> cnfTree = deepcopy(collapsedTree)
|
100 |
+
>>> chomsky_normal_form(cnfTree, factor='right')
|
101 |
+
>>> print(cnfTree)
|
102 |
+
(TOP
|
103 |
+
(S
|
104 |
+
(S+VP
|
105 |
+
(VBN Turned)
|
106 |
+
(S+VP|<ADVP-PP>
|
107 |
+
(ADVP (RB loose))
|
108 |
+
(PP
|
109 |
+
(IN in)
|
110 |
+
(NP
|
111 |
+
(NP (NNP Shane) (NP|<NNP-POS> (NNP Longman) (POS 's)))
|
112 |
+
(NP|<NN-NN> (NN trading) (NN room))))))
|
113 |
+
(S|<,-NP-VP-.>
|
114 |
+
(, ,)
|
115 |
+
(S|<NP-VP-.>
|
116 |
+
(NP (DT the) (NP|<NN-NNS> (NN yuppie) (NNS dealers)))
|
117 |
+
(S|<VP-.>
|
118 |
+
(VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
|
119 |
+
(. .))))))
|
120 |
+
|
121 |
+
Employ some Markov smoothing to make the artificial node labels a bit more
|
122 |
+
readable. See the treetransforms.py documentation for more details.
|
123 |
+
|
124 |
+
>>> markovTree = deepcopy(collapsedTree)
|
125 |
+
>>> chomsky_normal_form(markovTree, horzMarkov=2, vertMarkov=1)
|
126 |
+
>>> print(markovTree)
|
127 |
+
(TOP
|
128 |
+
(S^<TOP>
|
129 |
+
(S+VP^<S>
|
130 |
+
(VBN Turned)
|
131 |
+
(S+VP|<ADVP-PP>^<S>
|
132 |
+
(ADVP^<S+VP> (RB loose))
|
133 |
+
(PP^<S+VP>
|
134 |
+
(IN in)
|
135 |
+
(NP^<PP>
|
136 |
+
(NP^<NP>
|
137 |
+
(NNP Shane)
|
138 |
+
(NP|<NNP-POS>^<NP> (NNP Longman) (POS 's)))
|
139 |
+
(NP|<NN-NN>^<PP> (NN trading) (NN room))))))
|
140 |
+
(S|<,-NP>^<TOP>
|
141 |
+
(, ,)
|
142 |
+
(S|<NP-VP>^<TOP>
|
143 |
+
(NP^<S> (DT the) (NP|<NN-NNS>^<S> (NN yuppie) (NNS dealers)))
|
144 |
+
(S|<VP-.>^<TOP>
|
145 |
+
(VP^<S>
|
146 |
+
(AUX do)
|
147 |
+
(NP^<VP> (NP^<NP> (RB little)) (ADJP^<NP> (RB right))))
|
148 |
+
(. .))))))
|
149 |
+
|
150 |
+
Convert the transformed tree back to its original form
|
151 |
+
|
152 |
+
>>> un_chomsky_normal_form(markovTree)
|
153 |
+
>>> tree == markovTree
|
154 |
+
True
|
lib/python3.10/site-packages/nltk/test/unit/__init__.py
ADDED
File without changes
|
lib/python3.10/site-packages/nltk/test/unit/lm/__init__.py
ADDED
File without changes
|
lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Language Model Unit Tests
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Ilia Kurenkov <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
import unittest
|
9 |
+
|
10 |
+
import pytest
|
11 |
+
|
12 |
+
from nltk import FreqDist
|
13 |
+
from nltk.lm import NgramCounter
|
14 |
+
from nltk.util import everygrams
|
15 |
+
|
16 |
+
|
17 |
+
class TestNgramCounter:
|
18 |
+
"""Tests for NgramCounter that only involve lookup, no modification."""
|
19 |
+
|
20 |
+
@classmethod
|
21 |
+
def setup_class(self):
|
22 |
+
text = [list("abcd"), list("egdbe")]
|
23 |
+
self.trigram_counter = NgramCounter(
|
24 |
+
everygrams(sent, max_len=3) for sent in text
|
25 |
+
)
|
26 |
+
self.bigram_counter = NgramCounter(everygrams(sent, max_len=2) for sent in text)
|
27 |
+
self.case = unittest.TestCase()
|
28 |
+
|
29 |
+
def test_N(self):
|
30 |
+
assert self.bigram_counter.N() == 16
|
31 |
+
assert self.trigram_counter.N() == 21
|
32 |
+
|
33 |
+
def test_counter_len_changes_with_lookup(self):
|
34 |
+
assert len(self.bigram_counter) == 2
|
35 |
+
self.bigram_counter[50]
|
36 |
+
assert len(self.bigram_counter) == 3
|
37 |
+
|
38 |
+
def test_ngram_order_access_unigrams(self):
|
39 |
+
assert self.bigram_counter[1] == self.bigram_counter.unigrams
|
40 |
+
|
41 |
+
def test_ngram_conditional_freqdist(self):
|
42 |
+
case = unittest.TestCase()
|
43 |
+
expected_trigram_contexts = [
|
44 |
+
("a", "b"),
|
45 |
+
("b", "c"),
|
46 |
+
("e", "g"),
|
47 |
+
("g", "d"),
|
48 |
+
("d", "b"),
|
49 |
+
]
|
50 |
+
expected_bigram_contexts = [("a",), ("b",), ("d",), ("e",), ("c",), ("g",)]
|
51 |
+
|
52 |
+
bigrams = self.trigram_counter[2]
|
53 |
+
trigrams = self.trigram_counter[3]
|
54 |
+
|
55 |
+
self.case.assertCountEqual(expected_bigram_contexts, bigrams.conditions())
|
56 |
+
self.case.assertCountEqual(expected_trigram_contexts, trigrams.conditions())
|
57 |
+
|
58 |
+
def test_bigram_counts_seen_ngrams(self):
|
59 |
+
assert self.bigram_counter[["a"]]["b"] == 1
|
60 |
+
assert self.bigram_counter[["b"]]["c"] == 1
|
61 |
+
|
62 |
+
def test_bigram_counts_unseen_ngrams(self):
|
63 |
+
assert self.bigram_counter[["b"]]["z"] == 0
|
64 |
+
|
65 |
+
def test_unigram_counts_seen_words(self):
|
66 |
+
assert self.bigram_counter["b"] == 2
|
67 |
+
|
68 |
+
def test_unigram_counts_completely_unseen_words(self):
|
69 |
+
assert self.bigram_counter["z"] == 0
|
70 |
+
|
71 |
+
|
72 |
+
class TestNgramCounterTraining:
|
73 |
+
@classmethod
|
74 |
+
def setup_class(self):
|
75 |
+
self.counter = NgramCounter()
|
76 |
+
self.case = unittest.TestCase()
|
77 |
+
|
78 |
+
@pytest.mark.parametrize("case", ["", [], None])
|
79 |
+
def test_empty_inputs(self, case):
|
80 |
+
test = NgramCounter(case)
|
81 |
+
assert 2 not in test
|
82 |
+
assert test[1] == FreqDist()
|
83 |
+
|
84 |
+
def test_train_on_unigrams(self):
|
85 |
+
words = list("abcd")
|
86 |
+
counter = NgramCounter([[(w,) for w in words]])
|
87 |
+
|
88 |
+
assert not counter[3]
|
89 |
+
assert not counter[2]
|
90 |
+
self.case.assertCountEqual(words, counter[1].keys())
|
91 |
+
|
92 |
+
def test_train_on_illegal_sentences(self):
|
93 |
+
str_sent = ["Check", "this", "out", "!"]
|
94 |
+
list_sent = [["Check", "this"], ["this", "out"], ["out", "!"]]
|
95 |
+
|
96 |
+
with pytest.raises(TypeError):
|
97 |
+
NgramCounter([str_sent])
|
98 |
+
|
99 |
+
with pytest.raises(TypeError):
|
100 |
+
NgramCounter([list_sent])
|
101 |
+
|
102 |
+
def test_train_on_bigrams(self):
|
103 |
+
bigram_sent = [("a", "b"), ("c", "d")]
|
104 |
+
counter = NgramCounter([bigram_sent])
|
105 |
+
assert not bool(counter[3])
|
106 |
+
|
107 |
+
def test_train_on_mix(self):
|
108 |
+
mixed_sent = [("a", "b"), ("c", "d"), ("e", "f", "g"), ("h",)]
|
109 |
+
counter = NgramCounter([mixed_sent])
|
110 |
+
unigrams = ["h"]
|
111 |
+
bigram_contexts = [("a",), ("c",)]
|
112 |
+
trigram_contexts = [("e", "f")]
|
113 |
+
|
114 |
+
self.case.assertCountEqual(unigrams, counter[1].keys())
|
115 |
+
self.case.assertCountEqual(bigram_contexts, counter[2].keys())
|
116 |
+
self.case.assertCountEqual(trigram_contexts, counter[3].keys())
|
lib/python3.10/site-packages/nltk/test/unit/lm/test_preprocessing.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Language Model Unit Tests
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Ilia Kurenkov <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
import unittest
|
8 |
+
|
9 |
+
from nltk.lm.preprocessing import padded_everygram_pipeline
|
10 |
+
|
11 |
+
|
12 |
+
class TestPreprocessing(unittest.TestCase):
|
13 |
+
def test_padded_everygram_pipeline(self):
|
14 |
+
expected_train = [
|
15 |
+
[
|
16 |
+
("<s>",),
|
17 |
+
("<s>", "a"),
|
18 |
+
("a",),
|
19 |
+
("a", "b"),
|
20 |
+
("b",),
|
21 |
+
("b", "c"),
|
22 |
+
("c",),
|
23 |
+
("c", "</s>"),
|
24 |
+
("</s>",),
|
25 |
+
]
|
26 |
+
]
|
27 |
+
expected_vocab = ["<s>", "a", "b", "c", "</s>"]
|
28 |
+
train_data, vocab_data = padded_everygram_pipeline(2, [["a", "b", "c"]])
|
29 |
+
self.assertEqual([list(sent) for sent in train_data], expected_train)
|
30 |
+
self.assertEqual(list(vocab_data), expected_vocab)
|
lib/python3.10/site-packages/nltk/test/unit/lm/test_vocabulary.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Language Model Unit Tests
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Ilia Kurenkov <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
import unittest
|
9 |
+
from collections import Counter
|
10 |
+
from timeit import timeit
|
11 |
+
|
12 |
+
from nltk.lm import Vocabulary
|
13 |
+
|
14 |
+
|
15 |
+
class NgramModelVocabularyTests(unittest.TestCase):
|
16 |
+
"""tests Vocabulary Class"""
|
17 |
+
|
18 |
+
@classmethod
|
19 |
+
def setUpClass(cls):
|
20 |
+
cls.vocab = Vocabulary(
|
21 |
+
["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"],
|
22 |
+
unk_cutoff=2,
|
23 |
+
)
|
24 |
+
|
25 |
+
def test_truthiness(self):
|
26 |
+
self.assertTrue(self.vocab)
|
27 |
+
|
28 |
+
def test_cutoff_value_set_correctly(self):
|
29 |
+
self.assertEqual(self.vocab.cutoff, 2)
|
30 |
+
|
31 |
+
def test_unable_to_change_cutoff(self):
|
32 |
+
with self.assertRaises(AttributeError):
|
33 |
+
self.vocab.cutoff = 3
|
34 |
+
|
35 |
+
def test_cutoff_setter_checks_value(self):
|
36 |
+
with self.assertRaises(ValueError) as exc_info:
|
37 |
+
Vocabulary("abc", unk_cutoff=0)
|
38 |
+
expected_error_msg = "Cutoff value cannot be less than 1. Got: 0"
|
39 |
+
self.assertEqual(expected_error_msg, str(exc_info.exception))
|
40 |
+
|
41 |
+
def test_counts_set_correctly(self):
|
42 |
+
self.assertEqual(self.vocab.counts["a"], 2)
|
43 |
+
self.assertEqual(self.vocab.counts["b"], 2)
|
44 |
+
self.assertEqual(self.vocab.counts["c"], 1)
|
45 |
+
|
46 |
+
def test_membership_check_respects_cutoff(self):
|
47 |
+
# a was seen 2 times, so it should be considered part of the vocabulary
|
48 |
+
self.assertTrue("a" in self.vocab)
|
49 |
+
# "c" was seen once, it shouldn't be considered part of the vocab
|
50 |
+
self.assertFalse("c" in self.vocab)
|
51 |
+
# "z" was never seen at all, also shouldn't be considered in the vocab
|
52 |
+
self.assertFalse("z" in self.vocab)
|
53 |
+
|
54 |
+
def test_vocab_len_respects_cutoff(self):
|
55 |
+
# Vocab size is the number of unique tokens that occur at least as often
|
56 |
+
# as the cutoff value, plus 1 to account for unknown words.
|
57 |
+
self.assertEqual(5, len(self.vocab))
|
58 |
+
|
59 |
+
def test_vocab_iter_respects_cutoff(self):
|
60 |
+
vocab_counts = ["a", "b", "c", "d", "e", "f", "g", "w", "z"]
|
61 |
+
vocab_items = ["a", "b", "d", "e", "<UNK>"]
|
62 |
+
|
63 |
+
self.assertCountEqual(vocab_counts, list(self.vocab.counts.keys()))
|
64 |
+
self.assertCountEqual(vocab_items, list(self.vocab))
|
65 |
+
|
66 |
+
def test_update_empty_vocab(self):
|
67 |
+
empty = Vocabulary(unk_cutoff=2)
|
68 |
+
self.assertEqual(len(empty), 0)
|
69 |
+
self.assertFalse(empty)
|
70 |
+
self.assertIn(empty.unk_label, empty)
|
71 |
+
|
72 |
+
empty.update(list("abcde"))
|
73 |
+
self.assertIn(empty.unk_label, empty)
|
74 |
+
|
75 |
+
def test_lookup(self):
|
76 |
+
self.assertEqual(self.vocab.lookup("a"), "a")
|
77 |
+
self.assertEqual(self.vocab.lookup("c"), "<UNK>")
|
78 |
+
|
79 |
+
def test_lookup_iterables(self):
|
80 |
+
self.assertEqual(self.vocab.lookup(["a", "b"]), ("a", "b"))
|
81 |
+
self.assertEqual(self.vocab.lookup(("a", "b")), ("a", "b"))
|
82 |
+
self.assertEqual(self.vocab.lookup(("a", "c")), ("a", "<UNK>"))
|
83 |
+
self.assertEqual(
|
84 |
+
self.vocab.lookup(map(str, range(3))), ("<UNK>", "<UNK>", "<UNK>")
|
85 |
+
)
|
86 |
+
|
87 |
+
def test_lookup_empty_iterables(self):
|
88 |
+
self.assertEqual(self.vocab.lookup(()), ())
|
89 |
+
self.assertEqual(self.vocab.lookup([]), ())
|
90 |
+
self.assertEqual(self.vocab.lookup(iter([])), ())
|
91 |
+
self.assertEqual(self.vocab.lookup(n for n in range(0, 0)), ())
|
92 |
+
|
93 |
+
def test_lookup_recursive(self):
|
94 |
+
self.assertEqual(
|
95 |
+
self.vocab.lookup([["a", "b"], ["a", "c"]]), (("a", "b"), ("a", "<UNK>"))
|
96 |
+
)
|
97 |
+
self.assertEqual(self.vocab.lookup([["a", "b"], "c"]), (("a", "b"), "<UNK>"))
|
98 |
+
self.assertEqual(self.vocab.lookup([[[[["a", "b"]]]]]), ((((("a", "b"),),),),))
|
99 |
+
|
100 |
+
def test_lookup_None(self):
|
101 |
+
with self.assertRaises(TypeError):
|
102 |
+
self.vocab.lookup(None)
|
103 |
+
with self.assertRaises(TypeError):
|
104 |
+
list(self.vocab.lookup([None, None]))
|
105 |
+
|
106 |
+
def test_lookup_int(self):
|
107 |
+
with self.assertRaises(TypeError):
|
108 |
+
self.vocab.lookup(1)
|
109 |
+
with self.assertRaises(TypeError):
|
110 |
+
list(self.vocab.lookup([1, 2]))
|
111 |
+
|
112 |
+
def test_lookup_empty_str(self):
|
113 |
+
self.assertEqual(self.vocab.lookup(""), "<UNK>")
|
114 |
+
|
115 |
+
def test_eqality(self):
|
116 |
+
v1 = Vocabulary(["a", "b", "c"], unk_cutoff=1)
|
117 |
+
v2 = Vocabulary(["a", "b", "c"], unk_cutoff=1)
|
118 |
+
v3 = Vocabulary(["a", "b", "c"], unk_cutoff=1, unk_label="blah")
|
119 |
+
v4 = Vocabulary(["a", "b"], unk_cutoff=1)
|
120 |
+
|
121 |
+
self.assertEqual(v1, v2)
|
122 |
+
self.assertNotEqual(v1, v3)
|
123 |
+
self.assertNotEqual(v1, v4)
|
124 |
+
|
125 |
+
def test_str(self):
|
126 |
+
self.assertEqual(
|
127 |
+
str(self.vocab), "<Vocabulary with cutoff=2 unk_label='<UNK>' and 5 items>"
|
128 |
+
)
|
129 |
+
|
130 |
+
def test_creation_with_counter(self):
|
131 |
+
self.assertEqual(
|
132 |
+
self.vocab,
|
133 |
+
Vocabulary(
|
134 |
+
Counter(
|
135 |
+
["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"]
|
136 |
+
),
|
137 |
+
unk_cutoff=2,
|
138 |
+
),
|
139 |
+
)
|
140 |
+
|
141 |
+
@unittest.skip(
|
142 |
+
reason="Test is known to be flaky as it compares (runtime) performance."
|
143 |
+
)
|
144 |
+
def test_len_is_constant(self):
|
145 |
+
# Given an obviously small and an obviously large vocabulary.
|
146 |
+
small_vocab = Vocabulary("abcde")
|
147 |
+
from nltk.corpus.europarl_raw import english
|
148 |
+
|
149 |
+
large_vocab = Vocabulary(english.words())
|
150 |
+
|
151 |
+
# If we time calling `len` on them.
|
152 |
+
small_vocab_len_time = timeit("len(small_vocab)", globals=locals())
|
153 |
+
large_vocab_len_time = timeit("len(large_vocab)", globals=locals())
|
154 |
+
|
155 |
+
# The timing should be the same order of magnitude.
|
156 |
+
self.assertAlmostEqual(small_vocab_len_time, large_vocab_len_time, places=1)
|
lib/python3.10/site-packages/nltk/test/unit/test_aline.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Test Aline algorithm for aligning phonetic sequences
|
3 |
+
"""
|
4 |
+
from nltk.metrics import aline
|
5 |
+
|
6 |
+
|
7 |
+
def test_aline():
|
8 |
+
result = aline.align("θin", "tenwis")
|
9 |
+
expected = [[("θ", "t"), ("i", "e"), ("n", "n")]]
|
10 |
+
|
11 |
+
assert result == expected
|
12 |
+
|
13 |
+
result = aline.align("jo", "ʒə")
|
14 |
+
expected = [[("j", "ʒ"), ("o", "ə")]]
|
15 |
+
|
16 |
+
assert result == expected
|
17 |
+
|
18 |
+
result = aline.align("pematesiweni", "pematesewen")
|
19 |
+
expected = [
|
20 |
+
[
|
21 |
+
("p", "p"),
|
22 |
+
("e", "e"),
|
23 |
+
("m", "m"),
|
24 |
+
("a", "a"),
|
25 |
+
("t", "t"),
|
26 |
+
("e", "e"),
|
27 |
+
("s", "s"),
|
28 |
+
("i", "e"),
|
29 |
+
("w", "w"),
|
30 |
+
("e", "e"),
|
31 |
+
("n", "n"),
|
32 |
+
]
|
33 |
+
]
|
34 |
+
|
35 |
+
assert result == expected
|
36 |
+
|
37 |
+
result = aline.align("tuwθ", "dentis")
|
38 |
+
expected = [[("t", "t"), ("u", "i"), ("w", "-"), ("θ", "s")]]
|
39 |
+
|
40 |
+
assert result == expected
|
41 |
+
|
42 |
+
|
43 |
+
def test_aline_delta():
|
44 |
+
"""
|
45 |
+
Test aline for computing the difference between two segments
|
46 |
+
"""
|
47 |
+
assert aline.delta("p", "q") == 20.0
|
48 |
+
assert aline.delta("a", "A") == 0.0
|
lib/python3.10/site-packages/nltk/test/unit/test_bllip.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from nltk.data import find
|
4 |
+
from nltk.parse.bllip import BllipParser
|
5 |
+
from nltk.tree import Tree
|
6 |
+
|
7 |
+
|
8 |
+
@pytest.fixture(scope="module")
|
9 |
+
def parser():
|
10 |
+
model_dir = find("models/bllip_wsj_no_aux").path
|
11 |
+
return BllipParser.from_unified_model_dir(model_dir)
|
12 |
+
|
13 |
+
|
14 |
+
def setup_module():
|
15 |
+
pytest.importorskip("bllipparser")
|
16 |
+
|
17 |
+
|
18 |
+
class TestBllipParser:
|
19 |
+
def test_parser_loads_a_valid_tree(self, parser):
|
20 |
+
parsed = parser.parse("I saw the man with the telescope")
|
21 |
+
tree = next(parsed)
|
22 |
+
|
23 |
+
assert isinstance(tree, Tree)
|
24 |
+
assert (
|
25 |
+
tree.pformat()
|
26 |
+
== """
|
27 |
+
(S1
|
28 |
+
(S
|
29 |
+
(NP (PRP I))
|
30 |
+
(VP
|
31 |
+
(VBD saw)
|
32 |
+
(NP (DT the) (NN man))
|
33 |
+
(PP (IN with) (NP (DT the) (NN telescope))))))
|
34 |
+
""".strip()
|
35 |
+
)
|
36 |
+
|
37 |
+
def test_tagged_parse_finds_matching_element(self, parser):
|
38 |
+
parsed = parser.parse("I saw the man with the telescope")
|
39 |
+
tagged_tree = next(parser.tagged_parse([("telescope", "NN")]))
|
40 |
+
|
41 |
+
assert isinstance(tagged_tree, Tree)
|
42 |
+
assert tagged_tree.pformat() == "(S1 (NP (NN telescope)))"
|
lib/python3.10/site-packages/nltk/test/unit/test_brill.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for Brill tagger.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import unittest
|
6 |
+
|
7 |
+
from nltk.corpus import treebank
|
8 |
+
from nltk.tag import UnigramTagger, brill, brill_trainer
|
9 |
+
from nltk.tbl import demo
|
10 |
+
|
11 |
+
|
12 |
+
class TestBrill(unittest.TestCase):
|
13 |
+
def test_pos_template(self):
|
14 |
+
train_sents = treebank.tagged_sents()[:1000]
|
15 |
+
tagger = UnigramTagger(train_sents)
|
16 |
+
trainer = brill_trainer.BrillTaggerTrainer(
|
17 |
+
tagger, [brill.Template(brill.Pos([-1]))]
|
18 |
+
)
|
19 |
+
brill_tagger = trainer.train(train_sents)
|
20 |
+
# Example from https://github.com/nltk/nltk/issues/769
|
21 |
+
result = brill_tagger.tag("This is a foo bar sentence".split())
|
22 |
+
expected = [
|
23 |
+
("This", "DT"),
|
24 |
+
("is", "VBZ"),
|
25 |
+
("a", "DT"),
|
26 |
+
("foo", None),
|
27 |
+
("bar", "NN"),
|
28 |
+
("sentence", None),
|
29 |
+
]
|
30 |
+
self.assertEqual(result, expected)
|
31 |
+
|
32 |
+
@unittest.skip("Should be tested in __main__ of nltk.tbl.demo")
|
33 |
+
def test_brill_demo(self):
|
34 |
+
demo()
|
lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from nltk import ConditionalFreqDist, tokenize
|
6 |
+
|
7 |
+
|
8 |
+
class TestEmptyCondFreq(unittest.TestCase):
|
9 |
+
def test_tabulate(self):
|
10 |
+
empty = ConditionalFreqDist()
|
11 |
+
self.assertEqual(empty.conditions(), [])
|
12 |
+
with pytest.raises(ValueError):
|
13 |
+
empty.tabulate(conditions="BUG") # nonexistent keys shouldn't be added
|
14 |
+
self.assertEqual(empty.conditions(), [])
|
15 |
+
|
16 |
+
def test_plot(self):
|
17 |
+
empty = ConditionalFreqDist()
|
18 |
+
self.assertEqual(empty.conditions(), [])
|
19 |
+
empty.plot(conditions=["BUG"]) # nonexistent keys shouldn't be added
|
20 |
+
self.assertEqual(empty.conditions(), [])
|
21 |
+
|
22 |
+
def test_increment(self):
|
23 |
+
# make sure that we can still mutate cfd normally
|
24 |
+
text = "cow cat mouse cat tiger"
|
25 |
+
cfd = ConditionalFreqDist()
|
26 |
+
|
27 |
+
# create cfd with word length as condition
|
28 |
+
for word in tokenize.word_tokenize(text):
|
29 |
+
condition = len(word)
|
30 |
+
cfd[condition][word] += 1
|
31 |
+
|
32 |
+
self.assertEqual(cfd.conditions(), [3, 5])
|
33 |
+
|
34 |
+
# incrementing previously unseen key is still possible
|
35 |
+
cfd[2]["hi"] += 1
|
36 |
+
self.assertCountEqual(cfd.conditions(), [3, 5, 2]) # new condition added
|
37 |
+
self.assertEqual(
|
38 |
+
cfd[2]["hi"], 1
|
39 |
+
) # key's frequency incremented from 0 (unseen) to 1
|
lib/python3.10/site-packages/nltk/test/unit/test_chunk.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
from nltk import RegexpParser
|
4 |
+
|
5 |
+
|
6 |
+
class TestChunkRule(unittest.TestCase):
|
7 |
+
def test_tag_pattern2re_pattern_quantifier(self):
|
8 |
+
"""Test for bug https://github.com/nltk/nltk/issues/1597
|
9 |
+
|
10 |
+
Ensures that curly bracket quantifiers can be used inside a chunk rule.
|
11 |
+
This type of quantifier has been used for the supplementary example
|
12 |
+
in https://www.nltk.org/book/ch07.html#exploring-text-corpora.
|
13 |
+
"""
|
14 |
+
sent = [
|
15 |
+
("The", "AT"),
|
16 |
+
("September-October", "NP"),
|
17 |
+
("term", "NN"),
|
18 |
+
("jury", "NN"),
|
19 |
+
("had", "HVD"),
|
20 |
+
("been", "BEN"),
|
21 |
+
("charged", "VBN"),
|
22 |
+
("by", "IN"),
|
23 |
+
("Fulton", "NP-TL"),
|
24 |
+
("Superior", "JJ-TL"),
|
25 |
+
("Court", "NN-TL"),
|
26 |
+
("Judge", "NN-TL"),
|
27 |
+
("Durwood", "NP"),
|
28 |
+
("Pye", "NP"),
|
29 |
+
("to", "TO"),
|
30 |
+
("investigate", "VB"),
|
31 |
+
("reports", "NNS"),
|
32 |
+
("of", "IN"),
|
33 |
+
("possible", "JJ"),
|
34 |
+
("``", "``"),
|
35 |
+
("irregularities", "NNS"),
|
36 |
+
("''", "''"),
|
37 |
+
("in", "IN"),
|
38 |
+
("the", "AT"),
|
39 |
+
("hard-fought", "JJ"),
|
40 |
+
("primary", "NN"),
|
41 |
+
("which", "WDT"),
|
42 |
+
("was", "BEDZ"),
|
43 |
+
("won", "VBN"),
|
44 |
+
("by", "IN"),
|
45 |
+
("Mayor-nominate", "NN-TL"),
|
46 |
+
("Ivan", "NP"),
|
47 |
+
("Allen", "NP"),
|
48 |
+
("Jr.", "NP"),
|
49 |
+
(".", "."),
|
50 |
+
] # source: brown corpus
|
51 |
+
cp = RegexpParser("CHUNK: {<N.*>{4,}}")
|
52 |
+
tree = cp.parse(sent)
|
53 |
+
assert (
|
54 |
+
tree.pformat()
|
55 |
+
== """(S
|
56 |
+
The/AT
|
57 |
+
September-October/NP
|
58 |
+
term/NN
|
59 |
+
jury/NN
|
60 |
+
had/HVD
|
61 |
+
been/BEN
|
62 |
+
charged/VBN
|
63 |
+
by/IN
|
64 |
+
Fulton/NP-TL
|
65 |
+
Superior/JJ-TL
|
66 |
+
(CHUNK Court/NN-TL Judge/NN-TL Durwood/NP Pye/NP)
|
67 |
+
to/TO
|
68 |
+
investigate/VB
|
69 |
+
reports/NNS
|
70 |
+
of/IN
|
71 |
+
possible/JJ
|
72 |
+
``/``
|
73 |
+
irregularities/NNS
|
74 |
+
''/''
|
75 |
+
in/IN
|
76 |
+
the/AT
|
77 |
+
hard-fought/JJ
|
78 |
+
primary/NN
|
79 |
+
which/WDT
|
80 |
+
was/BEDZ
|
81 |
+
won/VBN
|
82 |
+
by/IN
|
83 |
+
(CHUNK Mayor-nominate/NN-TL Ivan/NP Allen/NP Jr./NP)
|
84 |
+
./.)"""
|
85 |
+
)
|
lib/python3.10/site-packages/nltk/test/unit/test_classify.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for nltk.classify. See also: nltk/test/classify.doctest
|
3 |
+
"""
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from nltk import classify
|
7 |
+
|
8 |
+
TRAIN = [
|
9 |
+
(dict(a=1, b=1, c=1), "y"),
|
10 |
+
(dict(a=1, b=1, c=1), "x"),
|
11 |
+
(dict(a=1, b=1, c=0), "y"),
|
12 |
+
(dict(a=0, b=1, c=1), "x"),
|
13 |
+
(dict(a=0, b=1, c=1), "y"),
|
14 |
+
(dict(a=0, b=0, c=1), "y"),
|
15 |
+
(dict(a=0, b=1, c=0), "x"),
|
16 |
+
(dict(a=0, b=0, c=0), "x"),
|
17 |
+
(dict(a=0, b=1, c=1), "y"),
|
18 |
+
]
|
19 |
+
|
20 |
+
TEST = [
|
21 |
+
(dict(a=1, b=0, c=1)), # unseen
|
22 |
+
(dict(a=1, b=0, c=0)), # unseen
|
23 |
+
(dict(a=0, b=1, c=1)), # seen 3 times, labels=y,y,x
|
24 |
+
(dict(a=0, b=1, c=0)), # seen 1 time, label=x
|
25 |
+
]
|
26 |
+
|
27 |
+
RESULTS = [(0.16, 0.84), (0.46, 0.54), (0.41, 0.59), (0.76, 0.24)]
|
28 |
+
|
29 |
+
|
30 |
+
def assert_classifier_correct(algorithm):
|
31 |
+
try:
|
32 |
+
classifier = classify.MaxentClassifier.train(
|
33 |
+
TRAIN, algorithm, trace=0, max_iter=1000
|
34 |
+
)
|
35 |
+
except (LookupError, AttributeError) as e:
|
36 |
+
pytest.skip(str(e))
|
37 |
+
|
38 |
+
for (px, py), featureset in zip(RESULTS, TEST):
|
39 |
+
pdist = classifier.prob_classify(featureset)
|
40 |
+
assert abs(pdist.prob("x") - px) < 1e-2, (pdist.prob("x"), px)
|
41 |
+
assert abs(pdist.prob("y") - py) < 1e-2, (pdist.prob("y"), py)
|
42 |
+
|
43 |
+
|
44 |
+
def test_megam():
|
45 |
+
assert_classifier_correct("MEGAM")
|
46 |
+
|
47 |
+
|
48 |
+
def test_tadm():
|
49 |
+
assert_classifier_correct("TADM")
|
lib/python3.10/site-packages/nltk/test/unit/test_collocations.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from nltk.collocations import BigramCollocationFinder
|
2 |
+
from nltk.metrics import BigramAssocMeasures
|
3 |
+
|
4 |
+
## Test bigram counters with discontinuous bigrams and repeated words
|
5 |
+
|
6 |
+
_EPSILON = 1e-8
|
7 |
+
SENT = "this this is is a a test test".split()
|
8 |
+
|
9 |
+
|
10 |
+
def close_enough(x, y):
|
11 |
+
"""Verify that two sequences of n-gram association values are within
|
12 |
+
_EPSILON of each other.
|
13 |
+
"""
|
14 |
+
|
15 |
+
return all(abs(x1[1] - y1[1]) <= _EPSILON for x1, y1 in zip(x, y))
|
16 |
+
|
17 |
+
|
18 |
+
def test_bigram2():
|
19 |
+
b = BigramCollocationFinder.from_words(SENT)
|
20 |
+
|
21 |
+
assert sorted(b.ngram_fd.items()) == [
|
22 |
+
(("a", "a"), 1),
|
23 |
+
(("a", "test"), 1),
|
24 |
+
(("is", "a"), 1),
|
25 |
+
(("is", "is"), 1),
|
26 |
+
(("test", "test"), 1),
|
27 |
+
(("this", "is"), 1),
|
28 |
+
(("this", "this"), 1),
|
29 |
+
]
|
30 |
+
assert sorted(b.word_fd.items()) == [("a", 2), ("is", 2), ("test", 2), ("this", 2)]
|
31 |
+
|
32 |
+
assert len(SENT) == sum(b.word_fd.values()) == sum(b.ngram_fd.values()) + 1
|
33 |
+
assert close_enough(
|
34 |
+
sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
|
35 |
+
[
|
36 |
+
(("a", "a"), 1.0),
|
37 |
+
(("a", "test"), 1.0),
|
38 |
+
(("is", "a"), 1.0),
|
39 |
+
(("is", "is"), 1.0),
|
40 |
+
(("test", "test"), 1.0),
|
41 |
+
(("this", "is"), 1.0),
|
42 |
+
(("this", "this"), 1.0),
|
43 |
+
],
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
def test_bigram3():
|
48 |
+
b = BigramCollocationFinder.from_words(SENT, window_size=3)
|
49 |
+
assert sorted(b.ngram_fd.items()) == sorted(
|
50 |
+
[
|
51 |
+
(("a", "test"), 3),
|
52 |
+
(("is", "a"), 3),
|
53 |
+
(("this", "is"), 3),
|
54 |
+
(("a", "a"), 1),
|
55 |
+
(("is", "is"), 1),
|
56 |
+
(("test", "test"), 1),
|
57 |
+
(("this", "this"), 1),
|
58 |
+
]
|
59 |
+
)
|
60 |
+
|
61 |
+
assert sorted(b.word_fd.items()) == sorted(
|
62 |
+
[("a", 2), ("is", 2), ("test", 2), ("this", 2)]
|
63 |
+
)
|
64 |
+
|
65 |
+
assert (
|
66 |
+
len(SENT) == sum(b.word_fd.values()) == (sum(b.ngram_fd.values()) + 2 + 1) / 2.0
|
67 |
+
)
|
68 |
+
assert close_enough(
|
69 |
+
sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
|
70 |
+
sorted(
|
71 |
+
[
|
72 |
+
(("a", "test"), 1.584962500721156),
|
73 |
+
(("is", "a"), 1.584962500721156),
|
74 |
+
(("this", "is"), 1.584962500721156),
|
75 |
+
(("a", "a"), 0.0),
|
76 |
+
(("is", "is"), 0.0),
|
77 |
+
(("test", "test"), 0.0),
|
78 |
+
(("this", "this"), 0.0),
|
79 |
+
]
|
80 |
+
),
|
81 |
+
)
|
82 |
+
|
83 |
+
|
84 |
+
def test_bigram5():
|
85 |
+
b = BigramCollocationFinder.from_words(SENT, window_size=5)
|
86 |
+
assert sorted(b.ngram_fd.items()) == sorted(
|
87 |
+
[
|
88 |
+
(("a", "test"), 4),
|
89 |
+
(("is", "a"), 4),
|
90 |
+
(("this", "is"), 4),
|
91 |
+
(("is", "test"), 3),
|
92 |
+
(("this", "a"), 3),
|
93 |
+
(("a", "a"), 1),
|
94 |
+
(("is", "is"), 1),
|
95 |
+
(("test", "test"), 1),
|
96 |
+
(("this", "this"), 1),
|
97 |
+
]
|
98 |
+
)
|
99 |
+
assert sorted(b.word_fd.items()) == sorted(
|
100 |
+
[("a", 2), ("is", 2), ("test", 2), ("this", 2)]
|
101 |
+
)
|
102 |
+
n_word_fd = sum(b.word_fd.values())
|
103 |
+
n_ngram_fd = (sum(b.ngram_fd.values()) + 4 + 3 + 2 + 1) / 4.0
|
104 |
+
assert len(SENT) == n_word_fd == n_ngram_fd
|
105 |
+
assert close_enough(
|
106 |
+
sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
|
107 |
+
sorted(
|
108 |
+
[
|
109 |
+
(("a", "test"), 1.0),
|
110 |
+
(("is", "a"), 1.0),
|
111 |
+
(("this", "is"), 1.0),
|
112 |
+
(("is", "test"), 0.5849625007211562),
|
113 |
+
(("this", "a"), 0.5849625007211562),
|
114 |
+
(("a", "a"), -1.0),
|
115 |
+
(("is", "is"), -1.0),
|
116 |
+
(("test", "test"), -1.0),
|
117 |
+
(("this", "this"), -1.0),
|
118 |
+
]
|
119 |
+
),
|
120 |
+
)
|
lib/python3.10/site-packages/nltk/test/unit/test_corpora.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from nltk.corpus import ( # mwa_ppdb
|
6 |
+
cess_cat,
|
7 |
+
cess_esp,
|
8 |
+
conll2007,
|
9 |
+
floresta,
|
10 |
+
indian,
|
11 |
+
ptb,
|
12 |
+
sinica_treebank,
|
13 |
+
udhr,
|
14 |
+
)
|
15 |
+
from nltk.tree import Tree
|
16 |
+
|
17 |
+
|
18 |
+
class TestUdhr(unittest.TestCase):
|
19 |
+
def test_words(self):
|
20 |
+
for name in udhr.fileids():
|
21 |
+
words = list(udhr.words(name))
|
22 |
+
self.assertTrue(words)
|
23 |
+
|
24 |
+
def test_raw_unicode(self):
|
25 |
+
for name in udhr.fileids():
|
26 |
+
txt = udhr.raw(name)
|
27 |
+
assert not isinstance(txt, bytes), name
|
28 |
+
|
29 |
+
def test_polish_encoding(self):
|
30 |
+
text_pl = udhr.raw("Polish-Latin2")[:164]
|
31 |
+
text_ppl = udhr.raw("Polish_Polski-Latin2")[:164]
|
32 |
+
expected = """POWSZECHNA DEKLARACJA PRAW CZŁOWIEKA
|
33 |
+
[Preamble]
|
34 |
+
Trzecia Sesja Ogólnego Zgromadzenia ONZ, obradująca w Paryżu, \
|
35 |
+
uchwaliła 10 grudnia 1948 roku jednomyślnie Powszechną"""
|
36 |
+
assert text_pl == expected, "Polish-Latin2"
|
37 |
+
assert text_ppl == expected, "Polish_Polski-Latin2"
|
38 |
+
|
39 |
+
|
40 |
+
class TestIndian(unittest.TestCase):
|
41 |
+
def test_words(self):
|
42 |
+
words = indian.words()[:3]
|
43 |
+
self.assertEqual(words, ["মহিষের", "সন্তান", ":"])
|
44 |
+
|
45 |
+
def test_tagged_words(self):
|
46 |
+
tagged_words = indian.tagged_words()[:3]
|
47 |
+
self.assertEqual(
|
48 |
+
tagged_words, [("মহিষের", "NN"), ("সন্তান", "NN"), (":", "SYM")]
|
49 |
+
)
|
50 |
+
|
51 |
+
|
52 |
+
class TestCess(unittest.TestCase):
|
53 |
+
def test_catalan(self):
|
54 |
+
words = cess_cat.words()[:15]
|
55 |
+
txt = "El Tribunal_Suprem -Fpa- TS -Fpt- ha confirmat la condemna a quatre anys d' inhabilitació especial"
|
56 |
+
self.assertEqual(words, txt.split())
|
57 |
+
self.assertEqual(cess_cat.tagged_sents()[0][34][0], "càrrecs")
|
58 |
+
|
59 |
+
def test_esp(self):
|
60 |
+
words = cess_esp.words()[:15]
|
61 |
+
txt = "El grupo estatal Electricité_de_France -Fpa- EDF -Fpt- anunció hoy , jueves , la compra del"
|
62 |
+
self.assertEqual(words, txt.split())
|
63 |
+
self.assertEqual(cess_esp.words()[115], "años")
|
64 |
+
|
65 |
+
|
66 |
+
class TestFloresta(unittest.TestCase):
|
67 |
+
def test_words(self):
|
68 |
+
words = floresta.words()[:10]
|
69 |
+
txt = "Um revivalismo refrescante O 7_e_Meio é um ex-libris de a"
|
70 |
+
self.assertEqual(words, txt.split())
|
71 |
+
|
72 |
+
|
73 |
+
class TestSinicaTreebank(unittest.TestCase):
|
74 |
+
def test_sents(self):
|
75 |
+
first_3_sents = sinica_treebank.sents()[:3]
|
76 |
+
self.assertEqual(
|
77 |
+
first_3_sents, [["一"], ["友情"], ["嘉珍", "和", "我", "住在", "同一條", "巷子"]]
|
78 |
+
)
|
79 |
+
|
80 |
+
def test_parsed_sents(self):
|
81 |
+
parsed_sents = sinica_treebank.parsed_sents()[25]
|
82 |
+
self.assertEqual(
|
83 |
+
parsed_sents,
|
84 |
+
Tree(
|
85 |
+
"S",
|
86 |
+
[
|
87 |
+
Tree("NP", [Tree("Nba", ["嘉珍"])]),
|
88 |
+
Tree("V‧地", [Tree("VA11", ["不停"]), Tree("DE", ["的"])]),
|
89 |
+
Tree("VA4", ["哭泣"]),
|
90 |
+
],
|
91 |
+
),
|
92 |
+
)
|
93 |
+
|
94 |
+
|
95 |
+
class TestCoNLL2007(unittest.TestCase):
|
96 |
+
# Reading the CoNLL 2007 Dependency Treebanks
|
97 |
+
|
98 |
+
def test_sents(self):
|
99 |
+
sents = conll2007.sents("esp.train")[0]
|
100 |
+
self.assertEqual(
|
101 |
+
sents[:6], ["El", "aumento", "del", "índice", "de", "desempleo"]
|
102 |
+
)
|
103 |
+
|
104 |
+
def test_parsed_sents(self):
|
105 |
+
|
106 |
+
parsed_sents = conll2007.parsed_sents("esp.train")[0]
|
107 |
+
|
108 |
+
self.assertEqual(
|
109 |
+
parsed_sents.tree(),
|
110 |
+
Tree(
|
111 |
+
"fortaleció",
|
112 |
+
[
|
113 |
+
Tree(
|
114 |
+
"aumento",
|
115 |
+
[
|
116 |
+
"El",
|
117 |
+
Tree(
|
118 |
+
"del",
|
119 |
+
[
|
120 |
+
Tree(
|
121 |
+
"índice",
|
122 |
+
[
|
123 |
+
Tree(
|
124 |
+
"de",
|
125 |
+
[Tree("desempleo", ["estadounidense"])],
|
126 |
+
)
|
127 |
+
],
|
128 |
+
)
|
129 |
+
],
|
130 |
+
),
|
131 |
+
],
|
132 |
+
),
|
133 |
+
"hoy",
|
134 |
+
"considerablemente",
|
135 |
+
Tree(
|
136 |
+
"al",
|
137 |
+
[
|
138 |
+
Tree(
|
139 |
+
"euro",
|
140 |
+
[
|
141 |
+
Tree(
|
142 |
+
"cotizaba",
|
143 |
+
[
|
144 |
+
",",
|
145 |
+
"que",
|
146 |
+
Tree("a", [Tree("15.35", ["las", "GMT"])]),
|
147 |
+
"se",
|
148 |
+
Tree(
|
149 |
+
"en",
|
150 |
+
[
|
151 |
+
Tree(
|
152 |
+
"mercado",
|
153 |
+
[
|
154 |
+
"el",
|
155 |
+
Tree("de", ["divisas"]),
|
156 |
+
Tree("de", ["Fráncfort"]),
|
157 |
+
],
|
158 |
+
)
|
159 |
+
],
|
160 |
+
),
|
161 |
+
Tree("a", ["0,9452_dólares"]),
|
162 |
+
Tree(
|
163 |
+
"frente_a",
|
164 |
+
[
|
165 |
+
",",
|
166 |
+
Tree(
|
167 |
+
"0,9349_dólares",
|
168 |
+
[
|
169 |
+
"los",
|
170 |
+
Tree(
|
171 |
+
"de",
|
172 |
+
[
|
173 |
+
Tree(
|
174 |
+
"mañana",
|
175 |
+
["esta"],
|
176 |
+
)
|
177 |
+
],
|
178 |
+
),
|
179 |
+
],
|
180 |
+
),
|
181 |
+
],
|
182 |
+
),
|
183 |
+
],
|
184 |
+
)
|
185 |
+
],
|
186 |
+
)
|
187 |
+
],
|
188 |
+
),
|
189 |
+
".",
|
190 |
+
],
|
191 |
+
),
|
192 |
+
)
|
193 |
+
|
194 |
+
|
195 |
+
@pytest.mark.skipif(
|
196 |
+
not ptb.fileids(),
|
197 |
+
reason="A full installation of the Penn Treebank is not available",
|
198 |
+
)
|
199 |
+
class TestPTB(unittest.TestCase):
|
200 |
+
def test_fileids(self):
|
201 |
+
self.assertEqual(
|
202 |
+
ptb.fileids()[:4],
|
203 |
+
[
|
204 |
+
"BROWN/CF/CF01.MRG",
|
205 |
+
"BROWN/CF/CF02.MRG",
|
206 |
+
"BROWN/CF/CF03.MRG",
|
207 |
+
"BROWN/CF/CF04.MRG",
|
208 |
+
],
|
209 |
+
)
|
210 |
+
|
211 |
+
def test_words(self):
|
212 |
+
self.assertEqual(
|
213 |
+
ptb.words("WSJ/00/WSJ_0003.MRG")[:7],
|
214 |
+
["A", "form", "of", "asbestos", "once", "used", "*"],
|
215 |
+
)
|
216 |
+
|
217 |
+
def test_tagged_words(self):
|
218 |
+
self.assertEqual(
|
219 |
+
ptb.tagged_words("WSJ/00/WSJ_0003.MRG")[:3],
|
220 |
+
[("A", "DT"), ("form", "NN"), ("of", "IN")],
|
221 |
+
)
|
222 |
+
|
223 |
+
def test_categories(self):
|
224 |
+
self.assertEqual(
|
225 |
+
ptb.categories(),
|
226 |
+
[
|
227 |
+
"adventure",
|
228 |
+
"belles_lettres",
|
229 |
+
"fiction",
|
230 |
+
"humor",
|
231 |
+
"lore",
|
232 |
+
"mystery",
|
233 |
+
"news",
|
234 |
+
"romance",
|
235 |
+
"science_fiction",
|
236 |
+
],
|
237 |
+
)
|
238 |
+
|
239 |
+
def test_news_fileids(self):
|
240 |
+
self.assertEqual(
|
241 |
+
ptb.fileids("news")[:3],
|
242 |
+
["WSJ/00/WSJ_0001.MRG", "WSJ/00/WSJ_0002.MRG", "WSJ/00/WSJ_0003.MRG"],
|
243 |
+
)
|
244 |
+
|
245 |
+
def test_category_words(self):
|
246 |
+
self.assertEqual(
|
247 |
+
ptb.words(categories=["humor", "fiction"])[:6],
|
248 |
+
["Thirty-three", "Scotty", "did", "not", "go", "back"],
|
249 |
+
)
|
250 |
+
|
251 |
+
|
252 |
+
@pytest.mark.skip("Skipping test for mwa_ppdb.")
|
253 |
+
class TestMWAPPDB(unittest.TestCase):
|
254 |
+
def test_fileids(self):
|
255 |
+
self.assertEqual(
|
256 |
+
mwa_ppdb.fileids(), ["ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs"]
|
257 |
+
)
|
258 |
+
|
259 |
+
def test_entries(self):
|
260 |
+
self.assertEqual(
|
261 |
+
mwa_ppdb.entries()[:10],
|
262 |
+
[
|
263 |
+
("10/17/01", "17/10/2001"),
|
264 |
+
("102,70", "102.70"),
|
265 |
+
("13,53", "13.53"),
|
266 |
+
("3.2.5.3.2.1", "3.2.5.3.2.1."),
|
267 |
+
("53,76", "53.76"),
|
268 |
+
("6.9.5", "6.9.5."),
|
269 |
+
("7.7.6.3", "7.7.6.3."),
|
270 |
+
("76,20", "76.20"),
|
271 |
+
("79,85", "79.85"),
|
272 |
+
("93,65", "93.65"),
|
273 |
+
],
|
274 |
+
)
|
lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Corpus View Regression Tests
|
3 |
+
"""
|
4 |
+
import unittest
|
5 |
+
|
6 |
+
import nltk.data
|
7 |
+
from nltk.corpus.reader.util import (
|
8 |
+
StreamBackedCorpusView,
|
9 |
+
read_line_block,
|
10 |
+
read_whitespace_block,
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
class TestCorpusViews(unittest.TestCase):
|
15 |
+
|
16 |
+
linetok = nltk.LineTokenizer(blanklines="keep")
|
17 |
+
names = [
|
18 |
+
"corpora/inaugural/README", # A very short file (160 chars)
|
19 |
+
"corpora/inaugural/1793-Washington.txt", # A relatively short file (791 chars)
|
20 |
+
"corpora/inaugural/1909-Taft.txt", # A longer file (32k chars)
|
21 |
+
]
|
22 |
+
|
23 |
+
def data(self):
|
24 |
+
for name in self.names:
|
25 |
+
f = nltk.data.find(name)
|
26 |
+
with f.open() as fp:
|
27 |
+
file_data = fp.read().decode("utf8")
|
28 |
+
yield f, file_data
|
29 |
+
|
30 |
+
def test_correct_values(self):
|
31 |
+
# Check that corpus views produce the correct sequence of values.
|
32 |
+
|
33 |
+
for f, file_data in self.data():
|
34 |
+
v = StreamBackedCorpusView(f, read_whitespace_block)
|
35 |
+
self.assertEqual(list(v), file_data.split())
|
36 |
+
|
37 |
+
v = StreamBackedCorpusView(f, read_line_block)
|
38 |
+
self.assertEqual(list(v), self.linetok.tokenize(file_data))
|
39 |
+
|
40 |
+
def test_correct_length(self):
|
41 |
+
# Check that the corpus views report the correct lengths:
|
42 |
+
|
43 |
+
for f, file_data in self.data():
|
44 |
+
v = StreamBackedCorpusView(f, read_whitespace_block)
|
45 |
+
self.assertEqual(len(v), len(file_data.split()))
|
46 |
+
|
47 |
+
v = StreamBackedCorpusView(f, read_line_block)
|
48 |
+
self.assertEqual(len(v), len(self.linetok.tokenize(file_data)))
|
lib/python3.10/site-packages/nltk/test/unit/test_data.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import nltk.data
|
4 |
+
|
5 |
+
|
6 |
+
def test_find_raises_exception():
|
7 |
+
with pytest.raises(LookupError):
|
8 |
+
nltk.data.find("no_such_resource/foo")
|
9 |
+
|
10 |
+
|
11 |
+
def test_find_raises_exception_with_full_resource_name():
|
12 |
+
no_such_thing = "no_such_thing/bar"
|
13 |
+
with pytest.raises(LookupError) as exc:
|
14 |
+
nltk.data.find(no_such_thing)
|
15 |
+
assert no_such_thing in str(exc)
|
lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
from nltk.metrics.agreement import AnnotationTask
|
4 |
+
|
5 |
+
|
6 |
+
class TestDisagreement(unittest.TestCase):
|
7 |
+
|
8 |
+
"""
|
9 |
+
Class containing unit tests for nltk.metrics.agreement.Disagreement.
|
10 |
+
"""
|
11 |
+
|
12 |
+
def test_easy(self):
|
13 |
+
"""
|
14 |
+
Simple test, based on
|
15 |
+
https://github.com/foolswood/krippendorffs_alpha/raw/master/krippendorff.pdf.
|
16 |
+
"""
|
17 |
+
data = [
|
18 |
+
("coder1", "dress1", "YES"),
|
19 |
+
("coder2", "dress1", "NO"),
|
20 |
+
("coder3", "dress1", "NO"),
|
21 |
+
("coder1", "dress2", "YES"),
|
22 |
+
("coder2", "dress2", "NO"),
|
23 |
+
("coder3", "dress3", "NO"),
|
24 |
+
]
|
25 |
+
annotation_task = AnnotationTask(data)
|
26 |
+
self.assertAlmostEqual(annotation_task.alpha(), -0.3333333)
|
27 |
+
|
28 |
+
def test_easy2(self):
|
29 |
+
"""
|
30 |
+
Same simple test with 1 rating removed.
|
31 |
+
Removal of that rating should not matter: K-Apha ignores items with
|
32 |
+
only 1 rating.
|
33 |
+
"""
|
34 |
+
data = [
|
35 |
+
("coder1", "dress1", "YES"),
|
36 |
+
("coder2", "dress1", "NO"),
|
37 |
+
("coder3", "dress1", "NO"),
|
38 |
+
("coder1", "dress2", "YES"),
|
39 |
+
("coder2", "dress2", "NO"),
|
40 |
+
]
|
41 |
+
annotation_task = AnnotationTask(data)
|
42 |
+
self.assertAlmostEqual(annotation_task.alpha(), -0.3333333)
|
43 |
+
|
44 |
+
def test_advanced(self):
|
45 |
+
"""
|
46 |
+
More advanced test, based on
|
47 |
+
http://www.agreestat.com/research_papers/onkrippendorffalpha.pdf
|
48 |
+
"""
|
49 |
+
data = [
|
50 |
+
("A", "1", "1"),
|
51 |
+
("B", "1", "1"),
|
52 |
+
("D", "1", "1"),
|
53 |
+
("A", "2", "2"),
|
54 |
+
("B", "2", "2"),
|
55 |
+
("C", "2", "3"),
|
56 |
+
("D", "2", "2"),
|
57 |
+
("A", "3", "3"),
|
58 |
+
("B", "3", "3"),
|
59 |
+
("C", "3", "3"),
|
60 |
+
("D", "3", "3"),
|
61 |
+
("A", "4", "3"),
|
62 |
+
("B", "4", "3"),
|
63 |
+
("C", "4", "3"),
|
64 |
+
("D", "4", "3"),
|
65 |
+
("A", "5", "2"),
|
66 |
+
("B", "5", "2"),
|
67 |
+
("C", "5", "2"),
|
68 |
+
("D", "5", "2"),
|
69 |
+
("A", "6", "1"),
|
70 |
+
("B", "6", "2"),
|
71 |
+
("C", "6", "3"),
|
72 |
+
("D", "6", "4"),
|
73 |
+
("A", "7", "4"),
|
74 |
+
("B", "7", "4"),
|
75 |
+
("C", "7", "4"),
|
76 |
+
("D", "7", "4"),
|
77 |
+
("A", "8", "1"),
|
78 |
+
("B", "8", "1"),
|
79 |
+
("C", "8", "2"),
|
80 |
+
("D", "8", "1"),
|
81 |
+
("A", "9", "2"),
|
82 |
+
("B", "9", "2"),
|
83 |
+
("C", "9", "2"),
|
84 |
+
("D", "9", "2"),
|
85 |
+
("B", "10", "5"),
|
86 |
+
("C", "10", "5"),
|
87 |
+
("D", "10", "5"),
|
88 |
+
("C", "11", "1"),
|
89 |
+
("D", "11", "1"),
|
90 |
+
("C", "12", "3"),
|
91 |
+
]
|
92 |
+
annotation_task = AnnotationTask(data)
|
93 |
+
self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632)
|
94 |
+
|
95 |
+
def test_advanced2(self):
|
96 |
+
"""
|
97 |
+
Same more advanced example, but with 1 rating removed.
|
98 |
+
Again, removal of that 1 rating should not matter.
|
99 |
+
"""
|
100 |
+
data = [
|
101 |
+
("A", "1", "1"),
|
102 |
+
("B", "1", "1"),
|
103 |
+
("D", "1", "1"),
|
104 |
+
("A", "2", "2"),
|
105 |
+
("B", "2", "2"),
|
106 |
+
("C", "2", "3"),
|
107 |
+
("D", "2", "2"),
|
108 |
+
("A", "3", "3"),
|
109 |
+
("B", "3", "3"),
|
110 |
+
("C", "3", "3"),
|
111 |
+
("D", "3", "3"),
|
112 |
+
("A", "4", "3"),
|
113 |
+
("B", "4", "3"),
|
114 |
+
("C", "4", "3"),
|
115 |
+
("D", "4", "3"),
|
116 |
+
("A", "5", "2"),
|
117 |
+
("B", "5", "2"),
|
118 |
+
("C", "5", "2"),
|
119 |
+
("D", "5", "2"),
|
120 |
+
("A", "6", "1"),
|
121 |
+
("B", "6", "2"),
|
122 |
+
("C", "6", "3"),
|
123 |
+
("D", "6", "4"),
|
124 |
+
("A", "7", "4"),
|
125 |
+
("B", "7", "4"),
|
126 |
+
("C", "7", "4"),
|
127 |
+
("D", "7", "4"),
|
128 |
+
("A", "8", "1"),
|
129 |
+
("B", "8", "1"),
|
130 |
+
("C", "8", "2"),
|
131 |
+
("D", "8", "1"),
|
132 |
+
("A", "9", "2"),
|
133 |
+
("B", "9", "2"),
|
134 |
+
("C", "9", "2"),
|
135 |
+
("D", "9", "2"),
|
136 |
+
("B", "10", "5"),
|
137 |
+
("C", "10", "5"),
|
138 |
+
("D", "10", "5"),
|
139 |
+
("C", "11", "1"),
|
140 |
+
("D", "11", "1"),
|
141 |
+
("C", "12", "3"),
|
142 |
+
]
|
143 |
+
annotation_task = AnnotationTask(data)
|
144 |
+
self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632)
|
lib/python3.10/site-packages/nltk/test/unit/test_distance.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Tuple
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from nltk.metrics.distance import edit_distance
|
6 |
+
|
7 |
+
|
8 |
+
class TestEditDistance:
|
9 |
+
@pytest.mark.parametrize(
|
10 |
+
"left,right,substitution_cost,expecteds",
|
11 |
+
[
|
12 |
+
# Allowing transpositions reduces the number of edits required.
|
13 |
+
# with transpositions:
|
14 |
+
# e.g. "abc" -T-> "cba" -D-> "ca": 2 steps
|
15 |
+
#
|
16 |
+
# without transpositions:
|
17 |
+
# e.g. "abc" -D-> "ab" -D-> "a" -I-> "ca": 3 steps
|
18 |
+
("abc", "ca", 1, (2, 3)),
|
19 |
+
("abc", "ca", 5, (2, 3)), # Doesn't *require* substitutions
|
20 |
+
# Note, a substition_cost of higher than 2 doesn't make much
|
21 |
+
# sense, as a deletion + insertion is identical, and always
|
22 |
+
# costs 2.
|
23 |
+
#
|
24 |
+
#
|
25 |
+
# Transpositions don't always reduce the number of edits required:
|
26 |
+
# with or without transpositions:
|
27 |
+
# e.g. "wants" -D-> "wats" -D-> "was" -I-> "wasp": 3 steps
|
28 |
+
("wants", "wasp", 1, (3, 3)),
|
29 |
+
("wants", "wasp", 5, (3, 3)), # Doesn't *require* substitutions
|
30 |
+
#
|
31 |
+
#
|
32 |
+
# Ought to have the same results with and without transpositions
|
33 |
+
# with or without transpositions:
|
34 |
+
# e.g. "rain" -S-> "sain" -S-> "shin" -I-> "shine": 3 steps
|
35 |
+
# (but cost 5 if substitution_cost=2)
|
36 |
+
("rain", "shine", 1, (3, 3)),
|
37 |
+
("rain", "shine", 2, (5, 5)), # Does *require* substitutions
|
38 |
+
#
|
39 |
+
#
|
40 |
+
# Several potentially interesting typos
|
41 |
+
# with transpositions:
|
42 |
+
# e.g. "acbdef" -T-> "abcdef": 1 step
|
43 |
+
#
|
44 |
+
# without transpositions:
|
45 |
+
# e.g. "acbdef" -D-> "abdef" -I-> "abcdef": 2 steps
|
46 |
+
("acbdef", "abcdef", 1, (1, 2)),
|
47 |
+
("acbdef", "abcdef", 2, (1, 2)), # Doesn't *require* substitutions
|
48 |
+
#
|
49 |
+
#
|
50 |
+
# with transpositions:
|
51 |
+
# e.g. "lnaguaeg" -T-> "languaeg" -T-> "language": 2 steps
|
52 |
+
#
|
53 |
+
# without transpositions:
|
54 |
+
# e.g. "lnaguaeg" -D-> "laguaeg" -I-> "languaeg" -D-> "languag" -I-> "language": 4 steps
|
55 |
+
("lnaguaeg", "language", 1, (2, 4)),
|
56 |
+
("lnaguaeg", "language", 2, (2, 4)), # Doesn't *require* substitutions
|
57 |
+
#
|
58 |
+
#
|
59 |
+
# with transpositions:
|
60 |
+
# e.g. "lnaugage" -T-> "lanugage" -T-> "language": 2 steps
|
61 |
+
#
|
62 |
+
# without transpositions:
|
63 |
+
# e.g. "lnaugage" -S-> "lnangage" -D-> "langage" -I-> "language": 3 steps
|
64 |
+
# (but one substitution, so a cost of 4 if substition_cost = 2)
|
65 |
+
("lnaugage", "language", 1, (2, 3)),
|
66 |
+
("lnaugage", "language", 2, (2, 4)),
|
67 |
+
# Does *require* substitutions if no transpositions
|
68 |
+
#
|
69 |
+
#
|
70 |
+
# with transpositions:
|
71 |
+
# e.g. "lngauage" -T-> "lnaguage" -T-> "language": 2 steps
|
72 |
+
# without transpositions:
|
73 |
+
# e.g. "lngauage" -I-> "lanaguage" -D-> "language": 2 steps
|
74 |
+
("lngauage", "language", 1, (2, 2)),
|
75 |
+
("lngauage", "language", 2, (2, 2)), # Doesn't *require* substitutions
|
76 |
+
#
|
77 |
+
#
|
78 |
+
# with or without transpositions:
|
79 |
+
# e.g. "wants" -S-> "sants" -S-> "swnts" -S-> "swits" -S-> "swims" -D-> "swim": 5 steps
|
80 |
+
#
|
81 |
+
# with substitution_cost=2 and transpositions:
|
82 |
+
# e.g. "wants" -T-> "santw" -D-> "sntw" -D-> "stw" -D-> "sw"
|
83 |
+
# -I-> "swi" -I-> "swim": 6 steps
|
84 |
+
#
|
85 |
+
# with substitution_cost=2 and no transpositions:
|
86 |
+
# e.g. "wants" -I-> "swants" -D-> "swant" -D-> "swan" -D-> "swa" -D-> "sw"
|
87 |
+
# -I-> "swi" -I-> "swim": 7 steps
|
88 |
+
("wants", "swim", 1, (5, 5)),
|
89 |
+
("wants", "swim", 2, (6, 7)),
|
90 |
+
#
|
91 |
+
#
|
92 |
+
# with or without transpositions:
|
93 |
+
# e.g. "kitten" -S-> "sitten" -s-> "sittin" -I-> "sitting": 3 steps
|
94 |
+
# (but cost 5 if substitution_cost=2)
|
95 |
+
("kitten", "sitting", 1, (3, 3)),
|
96 |
+
("kitten", "sitting", 2, (5, 5)),
|
97 |
+
#
|
98 |
+
# duplicated letter
|
99 |
+
# e.g. "duplicated" -D-> "duplicated"
|
100 |
+
("duplicated", "duuplicated", 1, (1, 1)),
|
101 |
+
("duplicated", "duuplicated", 2, (1, 1)),
|
102 |
+
("very duplicated", "very duuplicateed", 2, (2, 2)),
|
103 |
+
],
|
104 |
+
)
|
105 |
+
def test_with_transpositions(
|
106 |
+
self, left: str, right: str, substitution_cost: int, expecteds: Tuple[int, int]
|
107 |
+
):
|
108 |
+
"""
|
109 |
+
Test `edit_distance` between two strings, given some `substitution_cost`,
|
110 |
+
and whether transpositions are allowed.
|
111 |
+
|
112 |
+
:param str left: First input string to `edit_distance`.
|
113 |
+
:param str right: Second input string to `edit_distance`.
|
114 |
+
:param int substitution_cost: The cost of a substitution action in `edit_distance`.
|
115 |
+
:param Tuple[int, int] expecteds: A tuple of expected outputs, such that `expecteds[0]` is
|
116 |
+
the expected output with `transpositions=True`, and `expecteds[1]` is
|
117 |
+
the expected output with `transpositions=False`.
|
118 |
+
"""
|
119 |
+
# Test the input strings in both orderings
|
120 |
+
for s1, s2 in ((left, right), (right, left)):
|
121 |
+
# zip with [True, False] to get the transpositions value
|
122 |
+
for expected, transpositions in zip(expecteds, [True, False]):
|
123 |
+
predicted = edit_distance(
|
124 |
+
s1,
|
125 |
+
s2,
|
126 |
+
substitution_cost=substitution_cost,
|
127 |
+
transpositions=transpositions,
|
128 |
+
)
|
129 |
+
assert predicted == expected
|
lib/python3.10/site-packages/nltk/test/unit/test_downloader.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from nltk import download
|
2 |
+
|
3 |
+
|
4 |
+
def test_downloader_using_existing_parent_download_dir(tmp_path):
|
5 |
+
"""Test that download works properly when the parent folder of the download_dir exists"""
|
6 |
+
|
7 |
+
download_dir = str(tmp_path.joinpath("another_dir"))
|
8 |
+
download_status = download("mwa_ppdb", download_dir)
|
9 |
+
assert download_status is True
|
10 |
+
|
11 |
+
|
12 |
+
def test_downloader_using_non_existing_parent_download_dir(tmp_path):
|
13 |
+
"""Test that download works properly when the parent folder of the download_dir does not exist"""
|
14 |
+
|
15 |
+
download_dir = str(
|
16 |
+
tmp_path.joinpath("non-existing-parent-folder", "another-non-existing-folder")
|
17 |
+
)
|
18 |
+
download_status = download("mwa_ppdb", download_dir)
|
19 |
+
assert download_status is True
|
lib/python3.10/site-packages/nltk/test/unit/test_freqdist.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import nltk
|
2 |
+
|
3 |
+
|
4 |
+
def test_iterating_returns_an_iterator_ordered_by_frequency():
|
5 |
+
samples = ["one", "two", "two"]
|
6 |
+
distribution = nltk.FreqDist(samples)
|
7 |
+
assert list(distribution) == ["two", "one"]
|