text
stringlengths 22
301k
|
---|
# Match if the element is in a match list. Match is used only with _xWILD, and succeeds only if one of the list names matches a node
@RULES
_james <- _xWILD [match=(jim
jimmy james) singlet min=1 max=1] @@ |
@NODES _ROOT
@RULES
_columnHeaders <-
Class ### (1)
_xWILD [plus fails=(\n \r)]
_xWILD [one matches=(\n \r)]
@@
|
@DECL
###############################################
# General functions
###############################################
AddUniqueCon(L("concept"),L("name")) {
L("con") = findconcept(L("concept"),L("name"));
if (!L("con")) L("con") = makeconcept(L("concept"),L("name"));
return L("con");
}
AddUniqueStr(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("str") = getstrval(L("val"));
if (L("str") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addstrval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueNum(L("concept"),L("attr"),L("value")) {
if (L("value")) {
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("num") = getnumval(L("val"));
if (L("num") == L("value"))
return 0;
L("val") = nextval(L("val"));
}
addnumval(L("concept"),L("attr"),L("value"));
return 1;
}
return 0;
}
AddUniqueConVal(L("concept"),L("attr"),L("value")) {
"unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n";
L("val") = AttrValues(L("concept"),L("attr"));
while (L("val")) {
L("con") = getconval(L("val"));
"unique.txt" << conceptname(L("con")) << "\n";
if (conceptpath(L("con")) == conceptpath(L("value")))
return 0;
L("val") = nextval(L("val"));
}
addconval(L("concept"),L("attr"),L("value"));
return 1;
}
PathToConcept(L("parent"),L("hier")) {
L("cons") = split(L("hier")," ");
L("i") = 0;
L("con") = L("parent");
while (L("cons")[L("i")]) {
L("c") = L("cons")[L("i")];
L("name") = strsubst(L("c"),"\"",0);
if (L("name") != "concept")
L("con") = AddUniqueCon(L("con"),L("name"));
L("i")++;
}
return L("con");
}
CopyAttr(L("from"),L("to"),L("attr")) {
L("from value") = strval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr"),L("from value"));
}
}
CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) {
L("from value") = strval(L("from"),L("attr from"));
if (L("from value")) {
L("to value") = strval(L("to"),L("attr to"));
if (L("from value") && !L("to value"))
addstrval(L("to"),L("attr to"),L("from value"));
}
}
CopyConAttr(L("from"),L("to"),L("attr")) {
L("from value") = conval(L("from"),L("attr"));
if (L("from value")) {
L("to value") = conval(L("to"),L("attr"));
if (L("from value") && !L("to value"))
addconval(L("to"),L("attr"),L("from value"));
}
}
CopyConAttrs(L("from"),L("to")) {
L("attrs") = findattrs(L("from"));
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
L("name") = attrname(L("attrs"));
L("type") = attrtype(L("from"),L("name"));
while (L("vals")) {
if (L("type") == 1) {
addnumval(L("to"),L("name"),getnumval(L("vals")));
} else if (L("type") == 2) {
addconval(L("to"),L("name"),getconval(L("vals")));
} else if (L("type") == 3) {
addnumval(L("to"),L("name"),getfltval(L("vals")));
} else {
addstrval(L("to"),L("name"),getstrval(L("vals")));
}
L("vals") = nextval(L("vals"));
}
L("attrs") = nextattr(L("attrs"));
}
}
AttrValues(L("con"),L("attr")) {
L("at") = findattr(L("con"),L("attr"));
if (L("at"))
return attrvals(L("at"));
return 0;
}
ValCount(L("vals")) {
while (L("vals")) {
L("count")++;
L("vals") = nextval(L("vals"));
}
return L("count");
}
LastChild(L("parent")) {
L("child") = down(L("parent"));
while (L("child")) {
L("last") = L("child");
L("child") = next(L("child"));
}
return L("last");
}
MakeCountCon(L("con"),L("count name")) {
L("count name") = CountName(L("con"),L("count name"));
return makeconcept(L("con"),L("count name"));
}
IncrementCount(L("con"),L("countname")) {
L("count") = numval(L("con"),L("countname"));
if (L("count")) {
L("count") = L("count") + 1;
replaceval(L("con"),L("countname"),L("count"));
} else {
addnumval(L("con"),L("countname"),1);
L("count") = 1;
}
return L("count");
}
CountName(L("con"),L("root")) {
L("count") = IncrementCount(L("con"),L("root"));
return L("root") + str(L("count"));
}
StripEndDigits(L("name")) {
if (strisdigit(L("name"))) return 0;
L("len") = strlength(L("name")) - 1;
L("i") = L("len") - 1;
L("str") = strpiece(L("name"),L("i"),L("len"));
while (strisdigit(L("str")) && L("i")) {
L("i")--;
L("str") = strpiece(L("name"),L("i"),L("len"));
}
return strpiece(L("name"),0,L("i"));
}
###############################################
# KB Dump Functins
###############################################
DumpKB(L("con"),L("file")) {
L("dir") = G("$apppath") + "/kb/";
L("filename") = L("dir") + L("file") + ".kb";
if (!kbdumptree(L("con"),L("filename"))) {
"kb.txt" << "FAILED dump: " << L("filename") << "\n";
} else {
"kb.txt" << "DUMPED: " << L("filename") << "\n";
}
}
TakeKB(L("filename")) {
L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb";
"kb.txt" << "Taking: " << L("path") << "\n";
if (take(L("path"))) {
"kb.txt" << " Taken successfully: " << L("path") << "\n";
} else {
"kb.txt" << " Taken FAILED: " << L("path") << "\n";
}
}
ChildCount(L("con")) {
L("count") = 0;
L("child") = down(L("con"));
while (L("child")) {
L("count")++;
L("child") = next(L("child"));
}
return L("count");
}
###############################################
# KBB DISPLAY FUNCTIONS
###############################################
###############################################
# display type:
# 0 compact with ellipses on long attr values
# 1 full, more spread out
# 2 compact without ellipses on long attr values
###############################################
SaveToKB(L("con"),L("name")) {
L("filepath") = G("$kbpath") + L("name") + ".kbb";
L("file") = openfile(L("filepath"));
SaveKB(L("file"),L("con"),2);
closefile(L("file"));
}
SaveKB(L("file"),L("top con"),L("display type")) {
DisplayKBRecurse(L("file"),L("top con"),0,L("display type"));
L("file") << "\n";
return L("top con");
}
DisplayKB(L("top con"),L("display type")) {
L("file") = DisplayFileName();
DisplayKBRecurse(L("file"),L("top con"),0,L("display type"));
L("file") << "\n";
return L("top con");
}
KBHeader(L("text")) {
L("file") = DisplayFileName();
L("file") << "#######################\n";
L("file") << "# " << L("text") << "\n";
L("file") << "#######################\n\n";
}
DisplayFileName() {
if (num(G("$passnum")) < 10) {
L("file") = "ana00" + str(G("$passnum"));
}else if (num(G("$passnum")) < 100) {
L("file") = "ana0" + str(G("$passnum"));
} else {
L("file") = "ana" + str(G("$passnum"));
}
L("file") = L("file") + ".kbb";
return L("file");
}
DisplayKBRecurse(L("file"),L("parent"),L("level"),L("display type")) {
if (L("level") == 0) {
L("file") << conceptname(L("parent")) << "\n";
}
L("con") = down(L("parent"));
while (L("con")) {
L("file") << SpacesStr(L("level")+1) << conceptname(L("con"));
DisplayAttributes(L("file"),L("con"),L("display type"),L("level"));
L("file") << "\n";
if (down(L("con"))) {
L("lev") = 1;
DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("display type"));
}
L("con") = next(L("con"));
}
}
DisplayAttributes(L("file"),L("con"),L("display type"),L("level")) {
L("attrs") = findattrs(L("con"));
if (L("attrs")) L("file") << ": ";
if (L("display type") == 1 && L("attrs")) L("file") << "\n";
L("first attr") = 1;
while (L("attrs")) {
L("vals") = attrvals(L("attrs"));
L("count") = ValCount(L("vals"));
if (L("display type") != 1 && !L("first attr")) {
L("file") << ", ";
}
if (L("display type") == 1) {
if (!L("first attr")) L("file") << "\n";
L("file") << SpacesStr(L("level")+2);
}
L("name") = attrname(L("attrs"));
L("file") << QuoteIfNeeded(L("name")) << "=";
L("first") = 1;
L("type") = attrtype(L("con"),L("name"));
while (L("vals")) {
if (!L("first"))
L("file") << ",";
else if (L("type") != 2 && L("count") > 1)
L("file") << "[";
if (L("type") == 1) {
L("num") = getnumval(L("vals"));
L("file") << str(L("num"));
} else if (L("type") == 2) {
if (L("first"))
L("file") << "[";
L("c") = getconval(L("vals"));
L("file") << conceptpath(L("c"));
} else if (L("type") == 3) {
L("flt") = getfltval(L("vals"));
L("file") << str(L("flt"));
} else {
L("val") = getstrval(L("vals"));
if (L("display type") == 0 && strlength(L("val")) > 20) {
L("shorty") = strpiece(L("val"),0,20);
L("val") = L("shorty") + "...";
}
L("file") << QuoteIfNeeded(str(L("val")));
}
L("first") = 0;
L("vals") = nextval(L("vals"));
}
if (L("type") == 2 || L("count") > 1)
L("file") << "]";
L("first attr") = 0;
L("attrs") = nextattr(L("attrs"));
}
}
QuoteIfNeeded(L("str")) {
if (!L("str"))
return 0;
L("new") = L("str");
if (strcontains(" ",L("str")) || strhaspunct(L("str")))
L("new") = "\"" + L("new") + "\"";
return L("new");
}
# Because NLP++ doesn't allow for empty strings,
# this function can only be called with "num" >= 1
SpacesStr(L("num")) {
L("n") = 1;
L("spaces") = " ";
while (L("n") < L("num")) {
L("spaces") = L("spaces") + " ";
L("n")++;
}
return L("spaces");
}
PadStr(L("num str"),L("pad str"),L("pad len")) {
L("len") = strlength(L("num str"));
L("pad") = 0;
L("to pad") = L("pad len") - L("len");
while (L("i")++ < L("to pad")) {
L("pad") = L("pad") + L("pad str");
}
L("padded") = L("pad") + L("num str");
return L("padded");
}
###############################################
# DICTIONARY FUNCTIONS
###############################################
DictionaryStart() {
G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb";
G("attrs") = openfile(G("attrs path"));
}
DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) {
addword(L("word"));
addword(L("attrName"));
G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n";
G("attrs") << findwordpath(L("attrName")) << "\n";
if (L("attrType") == "str")
G("attrs") << "pst\n" << "\"" << L("value") << "\"";
else if (L("attrType") == "num")
G("attrs") << "pnum\n" << str(L("value"));
else if (L("attrType") == "con")
G("attrs") << "pcon\n" << conceptpath(L("value"));
G("attrs") << "\nend ind\n\n";
}
DictionaryEnd() {
G("attrs") << "\nquit\n\n";
closefile(G("attrs"));
}
OrderByCount(L("words"),L("order")) {
L("done") = 0;
L("sanity") = 0;
while (!L("done")) {
L("done") = 1;
L("conmax") = 0;
L("max") = 0;
L("word") = down(L("words"));
while (L("word")) {
L("check") = numval(L("word"),"checked");
if (!L("check")) {
L("done") = 0;
L("count") = numval(L("word"),"count");
if (L("count") > L("max")) {
"max.txt" << conceptname(L("word")) << " " << L("count") << "\n";
L("max") = L("count");
L("conmax") = L("word");
}
}
L("word") = next(L("word"));
}
if (!L("done") && L("conmax")) {
L("word") = conceptname(L("conmax"));
L("con") = makeconcept(L("order"),L("word"));
if (!spellword(L("word"))) {
addnumval(L("con"),"unknown",1);
}
addnumval(L("con"),"count",L("max"));
addnumval(L("conmax"),"checked",1);
}
if (L("safety")++ > 300) {
L("done") = 1;
}
}
}
@@DECL
|
@PATH _ROOT _LINE _example
@RULES
_ignore <-
father ### (1)
or ### (2)
mother ### (3)
@@
|
@PATH _ROOT _LINE _example
@POST
X("female",2) = 1;
@RULES
_xNIL <-
_xWILD [one matches=(woman girl female mother sister aunt)] ### (1)
@@ |
@NODES _ROOT
@RULES
_xNIL <-
_xNIL ### (1)
@@
|
@MULTI _section _sentence
@POST
singler(2,2);
@@POST
@RULES
_endSent <-
_xWILD [one match=(_xALPHA _xNUM _patientID _xWHITE) except=(\n \r)] ### (1)
_xWILD [one trig match=(\. \?)] ### (2)
_xWILD [one match=(_xWHITE _xEND _xCTRL)] ### (3)
@@ |
@DECL
addCategory(L("category"),L("categoryid")) {
L("con") = findconcept(G("categories"),L("category"));
if (!L("con")) {
L("con") = makeconcept(G("categories"),L("category"));
addnumval(L("con"),"count",1);
} else {
L("count") = getnumval(findvals(L("con"), "count"));
"dupes.txt" << L("categoryid") << "-" << L("category") << " " << L("count") << "\n";
replaceval(L("con"),"count",L("count")+1);
}
if (!attrwithval(L("con"),"catid",L("categoryid"))) {
addstrval(L("con"),"catid",L("categoryid"));
}
}
@@DECL
@CODE
G("categories") = findconcept(findroot(),"categories");
if (!G("categories"))
G("categories") = makeconcept(findroot(),"categories");
rmchildren(G("categories"));
@@CODE
|
@PATH _ROOT _FOOTER _bibBlock _bibBody _bibItem
@POST
S("text") = N("$text",3);
S("type") = "text";
singler(3,3);
@RULES
_text <-
_xWILD [matches=(_section _figure _ref _cite)] ### (1)
_NEWLINE [s] ### (2)
_xWILD [s plus fails=(_text _abstract _ref _cite _BLANKLINE _stopper _HEADER _FOOTER)] ### (3)
_xWILD [match=(_text _abstract _ref _cite _BLANKLINE _stopper _HEADER _FOOTER) lookahead] ### (4)
@@
@POST
S("text") = N("$text",2);
S("type") = "text";
singler(2,2);
@RULES
_text <-
_xWILD [matches=(_section _figure _ref _cite)] ### (1)
_xWILD [s plus fails=(_text _abstract _ref _cite _BLANKLINE _stopper _HEADER _FOOTER)] ### (2)
_xWILD [match=(_text _abstract _ref _cite _BLANKLINE _stopper _HEADER _FOOTER) lookahead] ### (3)
@@
|
@CODE
G("dictfile") = G("$kbpath") + "states.dictt";
G("abbrevdict") = G("$kbpath") + "abbrev.dictt";
G("kbbfile") = G("$kbpath") + "states.kbbb";
G("citiesdict") = G("$kbpath") + "cities.dictt";
G("debugfile") = G("$kbpath") + "debug.txt";
G("debug") = openfile(G("debugfile"),"app");
G("state name") = strsubst(G("$inputhead"),"-"," ");
G("states") = getconcept(findroot(),"states");
if (G("$isfirstfile") || !G("$isdirrun")) {
rmchildren(G("states"));
L("type") = "w";
} else {
L("type") = "app";
}
G("state") = getconcept(G("states"),G("state name"));
G("dict") = openfile(G("dictfile"),L("type"));
G("kbb") = openfile(G("kbbfile"),L("type"));
G("abbrev") = openfile(G("abbrevdict"),L("type"));
G("cities") = openfile(G("citiesdict"),L("type"));
@@CODE |
# Find "per capita" and reduce to _adj, searching descendents
@RULES
_adj <- per [s] capita @@ |
@CODE
L("hello") = 0;
@@CODE
@NODES _TEXTZONE
# New tokenization handlers.
@CHECK
if (!N("dissolve"))
fail();
@POST
splice(1,1);
@RULES
_xNIL <-
_tok
@@
########## IDIOMS
@POST
S("sem detail") = "usa";
S("sem") = "country";
S("ne") = 1;
S("ne type") = "location";
S("ne type conf") = 95;
S("country") = S("stem") = "usa";
singler(2,9);
@RULES
_nounCountry [layer=_noun] <-
_xWILD [one fail=(_xALPHA \.)]
u
\. [opt]
_xWHITE [star]
s
\. [opt]
_xWHITE [star]
a [opt]
\. [opt]
@@
_nounCountry [layer=_noun] <-
_xSTART
u
\. [opt]
_xWHITE [star]
s
\. [opt]
_xWHITE [star]
a [opt]
\. [opt]
@@
# Chaff
@PRE
<1,1> length(1);
<4,4> length(1);
<7,7> length(1);
<8,8> length(1); # !
@POST
group(1,8,"_letabbr");
N("cap",1) = 1;
N("ne",1) = 1;
@RULES
_xNIL <-
_xCAP
\. [opt]
_xWHITE [star]
_xCAP
\. [opt]
_xWHITE [star]
_xCAP
_xWILD [star match=(_xWHITE _xCAP \. )]
@@
#################
# Grab some letter - period sequences.
@PRE
<2,2> length(1);
<5,5> length(1);
<8,8> length(1);
@POST
S("cap") = 1; # 04/21/07 AM.
singler(2,8);
@RULES
_letabbr <-
_xWILD [one fail=(_xALPHA \.)]
_xCAP
\.
_xWHITE [star]
_xCAP
\.
_xWHITE [star]
_xCAP
\. [lookahead]
_xWILD [one fail=(_xALPHA \.)]
@@
_letabbr <-
_xSTART
_xCAP
\.
_xWHITE [star]
_xCAP
\.
_xWHITE [star]
_xCAP
\. [lookahead]
_xWILD [one fail=(_xALPHA \.)]
@@
########## IDIOMS
# un
# u.n.
@POST
S("sem") = S("ne type") = "organization";
S("ne type conf") = 95;
S("stem") = "united nations";
S("ne") = 1;
single();
@RULES
_caps [layer=_noun] <-
_xWILD [one fail=(_xALPHA \.)]
u [s]
\. [s opt]
_xWHITE [star]
n [s]
\. [s opt]
@@
_caps [layer=_noun] <-
_xSTART
u [s]
\. [s opt]
_xWHITE [star]
n [s]
\. [s opt]
@@
# US States.
@POST
S("sem") = "us_state";
S("ne type") = "location";
S("ne type conf") = 85;
S("stem") = strtolower(phrasetext());
S("ne") = 1;
S("mypos") = "NP";
single();
@RULES
_usstate [layer=_noun] <-
_xWILD [one fail=(_xALPHA \.)]
n [s]
\. [s opt]
_xWHITE [star]
_xWILD [s one match=(d j m h c y)]
\. [s opt]
@@
_usstate [layer=_noun] <-
_xWILD [one fail=(_xALPHA \.)]
s [s]
\. [s opt]
_xWHITE [star]
_xWILD [s one match=(c d)]
\. [s opt]
@@
_usstate [layer=_noun] <-
_xWILD [one fail=(_xALPHA \.)]
w [s]
\. [s opt]
_xWHITE [star]
v [s]
\. [s opt]
@@
_usstate [layer=_noun] <-
_xWILD [one fail=(_xALPHA \.)]
r [s]
\. [s opt]
_xWHITE [star]
i [s]
\. [s opt]
@@
_usstate [layer=_noun] <-
_xWILD [one fail=(_xALPHA \.)]
d [s]
\. [s opt]
_xWHITE [star]
c [s]
\. [s opt]
@@
# US States.
@POST
pncopyvars(1);
S("sem") = "us_state";
S("ne type") = "location";
S("ne type conf") = 85;
S("stem") = strtolower(phrasetext());
S("ne") = 1;
S("mypos") = "NP";
single();
@RULES
_usstate [layer=_noun] <-
Conn
\. [s]
@@
@PRE
<2,2> length(1);
<5,5> length(1);
@POST
S("cap") = 1; # 04/21/07 AM.
singler(2,6);
@RULES
_letabbr <-
_xWILD [one fail=(_xALPHA \.)]
_xCAP
\.
_xWHITE [star]
_xCAP
\.
_xWILD [one fail=(_xALPHA \.)]
@@
_letabbr <-
_xSTART
_xCAP
\.
_xWHITE [star]
_xCAP
\.
_xWILD [one fail=(_xALPHA \.)]
@@
@POST
S("cap") = 1; # 04/21/07 AM.
singler(2,3);
@RULES
_letabbr <-
_xWILD [one fail=(_xALPHA \.)]
_xCAPLET
\.
_xWILD [one lookahead fail=(_xALPHA \.)]
@@
_letabbr <-
_xSTART
_xCAPLET
\.
_xWILD [one lookahead fail=(_xALPHA \.)]
@@
# ...
@POST
S("nopos") = 1;
single();
@RULES
_qEOS <-
_xWILD [min=3 match=(\. )]
@@
@POST
singler(1,1);
@RULES
_qEOS <-
\,
\"
@@
@RULES
_dbldash <- _xWHITE [plus] \- [plus] _xWHITE [plus] @@
@POST
group(2,3,"_dbldash");
N("nopos",2) = 1;
@RULES
_xNIL <-
_xALPHA
\-
\-
_xALPHA [lookahead]
@@
# alpha ' s
#@POST
# N("apos-s",1) = 1;
# excise(2,3);
#@RULES
#_xNIL <-
# _xALPHA
# \'
# s
# @@
@RULES
_aposS <-
\' [s]
s [s]
@@
_aposD <-
\' [s]
d [s]
@@
# aposLL
# Note: I'll ....
@POST
group(1,2,"_modal");
N("mypos",1) = "MD";
@RULES
_xNIL <-
\'
ll
@@
@POST
if (N(6))
N("quoted eos left",6) = 1;
L("txt") = N("$text",2);
if (L("txt") == "?")
S("sent end") = "interrogative";
else if (L("txt") == "!")
S("sent end") = "exclamatory";
singler(2,4);
@RULES
_qEOS <- # 05/27/07 AM.
_xWILD [one match=(_xALPHA _xNUM \] \) \> \% _noun)]
_xWILD [plus match=( \. \: \; \? \! )]
_xWHITE [star]
_dblquote
_xWHITE [star lookahead]
_xANY
@@
# NOTE: Trying to retain quotes within a sentence.
# alpha " alpha
@POST
N("dblquote rt",1) = 1;
N("dblquote lt",3) = 1;
noop();
@RULES
_xNIL <-
_xALPHA
_dblquote
_xALPHA [lookahead]
@@
# Zap double quotes for now...
@POST
if (N(1))
N("dblquote rt",1) = 1;
if (N(3))
N("dblquote lt",3) = 1;
excise(2,2);
@RULES
_xNIL <-
_xANY
_dblquote [trigger]
_xWILD [one lookahead match=(
_xEND)]
@@
_xNIL <-
_xSTART
_dblquote
_xANY [lookahead]
@@
# num %
@POST
chpos(N(2),"NN");
pncopyvars(1);
sclearpos(1,0);
single();
@RULES
_noun <-
_num
\%
@@
# HTML/XML crud.
@POST
group(1,3,"_dblquote");
@RULES
_xNIL <-
\&
_xWILD [one match=(quot)]
\;
@@
# Artifacts from tokenization.
# can't
# ca n't
@CHECK
if (N("tok",2) != "n't")
fail();
@POST
L("tmp1") = N(1);
group(1,1,"_modal");
pncopyvars(L("tmp1"),N(1));
chpos(N(1),"MD");
N("stem",1) = N("text",1) = "can";
@RULES
_xNIL <-
ca
_adv
@@
# won't
# wo n't
@CHECK
if (N("tok",2) != "n't")
fail();
@POST
L("tmp1") = N(1);
group(1,1,"_modal");
pncopyvars(L("tmp1"),N(1));
chpos(N(1),"MD");
N("stem",1) = N("text",1) = "will";
@RULES
_xNIL <-
wo
_adv
@@
# num num
@PRE
<2,2> var("fraction");
@POST
group(1,2,"_num");
N("number",1) = "plural";
@RULES
_xNIL <-
_xNUM
_num
@@
@POST
group(1,5,"_EQ");
group(1,1,"_adv");
chpos(N(1),"RB"); # 03/04/10 AM.
@RULES
_xNIL <- e \. _xWHITE [star] g \. @@
|
@CODE
prlit("output.txt","\n")
prlit("output.txt","EDUCATION:\n\n")
@@CODE
#@MULTI _educationZone #
@MULTI _educationInstance # 11/16/99 AM.
@POST
prlit("output.txt","School Name: ")
prtree("output.txt",1,"_CompleteSchoolName")
prlit("output.txt","\n")
@RULES
_xNIL <- _CompleteSchoolName @@
@POST
prlit("output.txt","School Location: ")
prtree("output.txt",1,"_SchoolLocation")
prlit("output.txt","\n")
@RULES
_xNIL <- _SchoolLocation @@
@POST
prlit("output.txt","Degree Major: ")
prtree("output.txt",1,"_degreeInMajor")
prlit("output.txt","\n")
@RULES
_xNIL <- _degreeInMajor @@
@POST
prlit("output.txt","Major: ")
prtree("output.txt",1,"_RealMajor")
prlit("output.txt","\n")
@RULES
_xNIL <- _RealMajor @@
@POST
prlit("output.txt","Minor: ")
prtree("output.txt",1,"_minor")
prlit("output.txt","\n")
@RULES
_xNIL <- _minor @@
@POST
prlit("output.txt","Year Graduated: ")
prtree("output.txt",1,"_SingleDate")
prlit("output.txt","\n")
@RULES
_xNIL <- _SingleDate @@
@POST
prlit("output.txt","Attended: ")
prtree("output.txt",1,"_DateRange")
prlit("output.txt","\n")
@RULES
_xNIL <- _DateRange @@
@POST
prlit("output.txt","GPA: ")
prtree("output.txt",1,"_gradeValue")
prlit("output.txt","\n")
prlit("output.txt","\n")
@RULES
_xNIL <- _Grade @@
|
@CODE
DisplayKB(G("headers"),0);
@@CODE |
# Remove value named str2 from attribute named str1 under concept con. Also removes the attribute.
rmattrval(L("con"), L("str1"), L("str2")); |
# NOTE: String and comment collection must be done together. #
# NOTE: Strings must be collected in a pass following collection of \". #
@POST
excise(1, 3)
@RULES
_xNIL <- \# _xWILD \n @@
@POST
excise(1, 2)
@RULES
_xNIL <- \# _xWILD _xEOF @@
@POST
rfastr(2)
single()
@RULES
_STR [base] <- \" _xWILD _xWILD [one match=( \" \n )] @@ # 07/12/06 AM.
# Moved x_commas here. (May want to get rid of this in the future.)
#@POST
# excise(1, 1)
#@RULES
#_xNIL <- \, [plus] @@
# EXPRESSION GRAMMAR STUFF. #
@POST
rfaop(1,2)
single()
@RULES
_opAND <- \& \& @@
_opOR <- \| \| @@
_opINC <- \+ \+ @@
_opDEC <- \- \- @@
_opEQ <- \= \= @@
_opNEQ <- \! \= @@
_opGE <- \> \= @@
_opLE <- \< \= @@
_opCONF <- \% \% @@ # 12/17/99 AM.
_opOUT <- \< \< @@ # 12/31/99 AM.
# MOVED COMPONENTS HERE.
@RULES
_ENDRULE [base] <- \@ \@ _xWHITE @@
#@POST
# noop()
#@RULES
#_xNIL <- _xWILD [min=1 max=1 fail=(\@)] @@
@RULES
_ENDRULE [base] <- \@ \@ _xEOF @@
_eoPOST [base layer=(_endMark)] <- \@ \@ POST [t] @@
_eoCHECK [base layer=(_endMark)] <- \@ \@ CHECK [t] @@
_eoPRE [base layer=(_endMark)] <- \@ \@ PRE [t] @@
_eoRULES [base layer=(_endMark)] <- \@ \@ RULES [t] @@
_eoRECURSE [base layer=(_endMark)] <- \@ \@ RECURSE [t] @@
_eoSELECT [base layer=(_endMark)] <- \@ \@ SELECT [t] @@
_eoNODES [base layer=(_endMark)] <- \@ \@ NODES [t] @@
_eoMULTI [base layer=(_endMark)] <- \@ \@ MULTI [t] @@
_eoPATH [base layer=(_endMark)] <- \@ \@ PATH [t] @@
_eoCODE [base layer=(_endMark)] <- \@ \@ CODE [t] @@
_eoDECL [base layer=(_endMark)] <- \@ \@ DECL [t] @@ # 12/19/01 AM.
# _soRULES [base layer=(_startMark)] <- \@ RULES [t] @@ #
_soPOST [base layer=(_startMark)] <- \@ POST [t] @@
_soCHECK [base layer=(_startMark)] <- \@ CHECK [t] @@
_soPRE [base layer=(_startMark)] <- \@ PRE [t] @@
_soNODES [base layer=(_startMark)] <- \@ NODES [t] @@
_soMULTI [base layer=(_startMark)] <- \@ MULTI [t] @@
_soPATH [base layer=(_startMark)] <- \@ PATH [t] @@
_soCODE [base layer=(_startMark)] <- \@ CODE [t] @@
_soDECL [base layer=(_startMark)] <- \@ DECL [t] @@
_soSELECT [base layer=(_startMark)] <- \@ SELECT [t] @@
_soRECURSE [base layer=(_startMark)] <- \@ RECURSE [t] @@
# Separating out rule mark so it can be counted.
# If there are none, then don't need to warn about no rules in pass. #
@POST
rfarulemark()
single()
@RULES
_soRULES [base layer=(_startMark)] <- \@ RULES [t] @@
@POST
rfanonlit(2)
single()
@RULES
_NONLIT [base] <- \_ _xWILD [s one match=(_xALPHA _xEMOJI)] @@
@RULES
_ARROW [base] <- \< \- @@
@POST
rfaname(1)
single()
@RULES
# Not setting base for these potential keywords. #
_LIT <- _xWILD [s one match=(
N X G P s
L # LOCAL VARS FOR USER-DEFINED FNS. # 03/09/02 AM.
if else while return
)] @@ # 11/06/99 AM.
_LIT [base] <- _xALPHA @@
_LIT [base] <- _xEMOJI @@
_LIT [base] <- _xCTRL @@ # 10/14/13 AM.
# ADDING FLOAT TO GRAMMAR. #
@POST
rfafloat(1,3)
single()
@RULES
_FLOAT <- _xNUM \. _xNUM @@
@POST
rfanum(1)
single()
@RULES
_NUM [base] <- _xNUM @@
|
@PATH _ROOT _LINE _conjugation
@POST
X("up",3) = 1;
excise(1,1);
noop();
@RULES
_xNIL <-
_xWHITE [s] ### (1)
@@
|
@PATH _ROOT _contactZone _LINE
# If human names, track highest confidence.
@CHECK
if (!X("name found",2) && !N("name found") && G("humannames") > 1)
succeed();
fail();
@POST
if (!N("humanname conf"))
{
# Must have come from Gram hierarchy or other strong source.
# Assign an initial confidence.
N("humanname conf") = 90;
}
# Now some computations in the contact zone.
N("ctcname conf") = N("humanname conf");
# Proximity to first address line.
if (X("lineno") < X("first addressline",2))
{
N("diff") = X("first addressline",2) - X("lineno");
N("ctcname conf") = N("ctcname conf") %% -(N("diff") * 15);
}
# Formatting above and below line.
if (X("format above"))
N("ctcname conf") = N("ctcname conf") %% 80;
if (X("format below"))
N("ctcname conf") = N("ctcname conf") %% 80;
# Now, comparison to others.
if (N("ctcname conf") > X("hi ctcname conf",2))
X("hi ctcname conf",2) = N("ctcname conf"); # High so far.
@RULES
_xNIL <- _humanName @@
|
@NODES _ROOT
@POST
S("section_title") = N("$text", 1);
# excise(4,4);
excise(1,2);
single();
@RULES
_section <-
_xWILD [min=1 max=10 fails=(\: \n \r _break)] ### (1)
\: [trig] ### (2)
_xWILD [fails=(_break _xEND)] ### (3)
# _xWILD [one matches=(_break _xEND)] ### (4)
@@
# @POST
# excise(1,1);
# @RULES
# _xNIL <-
# _break
# @@
|
@PATH _ROOT _posZone _defZone _definition _headerZone _LINE
@POST
L("con") = MakeCountCon(X("con",3),"variation");
addstrval(L("con"),"text",N("$text",2));
singler(2,2);
@RULES
_xNIL <-
_item ### (1)
@@ |
@PATH _ROOT _DECL _NLPPP
@POST
rfbdecls(1)
single()
@RULES
_DECL [base] <- _FUNCDEF [plus] @@
# Ignore ok things within a mangled function body.
@RULES
_ERROR <-
_FNCALL
\{
_STMTS [star]
@@
_ERROR <- _STMTS [plus] @@
# Will find only first error in a function.
# Better than nothing.
@POST
rfberror(1)
single()
@RULES
_ERROR <- _xANY [plus] @@
|
@CODE
# If not outputting tags, exit.
if (!G("verbose")) # Todo: another flag here.
exitpass();
if (!G("pretagged"))
exitpass();
# Initialize traversal data.
G("parseRoot") = pnroot();
G("node") = G("parseRoot");
G("depth") = 0;
G("path") = 0; # Store the current path in the parse tree.
while (G("node")) # Traverse the parse tree.
{
G("path")[G("depth")] = G("node"); # Update path in the parse tree.
G("childs") = pndown(G("node"));
G("next") = pnnext(G("node"));
# Stop at a node with a part-of-speech array.
L("posarr len") = pnvar(G("node"),"posarr len");
L("nopos") = pnvar(G("node"),"nopos");
L("nm") = pnname(G("node"));
L("ln") = strlength(L("nm")) - 1;
if (strpiece(L("nm"),0,0) != "_")
{
if (strisdigit(L("nm")))
pnreplaceval(G("node"),"mypos","CD");
L("nm") = "NIL";
}
else
{
if (L("nm") == "_sent")
"tags.txt" << "\n";
if (L("ln") >= 1)
L("nm") = strpiece(L("nm"),1,L("ln"));
}
if (L("posarr len") > 0)
{
G("gochilds") = 0; # Don't traverse children.
L("npos") = nodepos(G("node"),L("nm"));
L("txt") = prosify(G("node"),"text"); # 12/15/20 AM.
"tags.txt" << L("txt")
<< " / ("
<< pnvar(G("node"),"posarr")
<< ") "
<< L("npos");
;
if (!scorenodepos(G("node"),L("npos"))) # mismatch
{
pnrename(G("node"),"_poserr");
"tags.txt" << " *********************"; # 01/08/05 AM.
if (L("npos")) # 06/14/06 AM.
{
G("mismatch out") << L("txt") # 06/14/06 AM.
<< " / ("
<< pnvar(G("node"),"posarr")
<< ") "
<< L("npos")
;
if (G("mismatch verbose"))
{
G("mismatch out") << " (" << G("$inputhead") << ")";
}
G("mismatch out") << "\n";
}
else
{
G("zero out") << L("txt")
<< " / ("
<< pnvar(G("node"),"posarr")
<< ") "
<< L("npos")
;
if (G("mismatch verbose"))
{
G("zero out") << " (" << G("$inputhead") << ")";
}
G("zero out") << "\n";
}
}
"tags.txt" << "\n";
}
else if (L("nopos")) # Things like punctuation...
{
G("gochilds") = 0; # Don't traverse children.
L("txt") = pnvar(G("node"),"$text");
"tags.txt" << L("txt")
<< " / ()"
<< "\n";
}
else
G("gochilds") = 1; # Traverse children.
if (G("childs") # If node has children, traverse them.
&& G("gochilds"))
{
G("node") = G("childs");
++G("depth");
}
else if (G("next")) # If node has right sibling, get it.
G("node") = G("next");
else # Try continuing from a parent.
{
G("done") = 0;
G("node") = 0;
while (!G("done")) # Find a continuation or quit.
{
if (--G("depth") > 0)
{
G("node") = G("path")[G("depth")]; # Move up to parent.
if (G("node") = pnnext(G("node"))) # Try to move right.
G("done") = 1;
}
else
G("done") = 1;
}
}
}
@@CODE
|
@PATH _ROOT _nepali _headerZone
@POST
G("word") = makeconcept(G("words"),N("$text",2));
@RULES
_xNIL <-
_LINE ### (1)
_LINE ### (2)
@@
|
@CODE
L("hello") = 0;
@@CODE
@NODES _sent
@CHECK
if (!N("needs-np",1))
fail();
@POST
L("tmp") = N(2);
group(2,2,"_noun");
pncopyvars(L("tmp"),N(2));
@RULES
_xNIL <-
_det
_xALPHA
_prep [lookahead]
@@
# vg det alpha alpha .
@CHECK
if (!N("noun",4) && !N("adv",4))
fail();
@POST
L("tmp4") = N(4);
L("tmp3") = N(3);
if (N("noun",4))
{
group(4,4,"_noun");
pncopyvars(L("tmp4"),N(4));
if (N("adj",3))
group(3,3,"_adj");
else if (N("noun",3))
group(3,3,"_noun");
else
group(3,3,"_adj");
pncopyvars(L("tmp3"),N(3));
if (pnname(N(3)) == "_adj")
fixadj(N(3));
group(2,4,"_np");
pncopyvars(L("tmp4"),N(2));
clearpos(N(2),1,1); # Zero out token info.
}
else # 4 = adv
{
group(4,4,"_adv");
pncopyvars(L("tmp4"),N(4));
group(3,3,"_noun");
pncopyvars(L("tmp3"),N(3));
group(2,3,"_np");
pncopyvars(L("tmp3"),N(2));
clearpos(N(2),1,1); # Zero out token info.
}
if (!N("voice",1))
N("voice",1) = "active";
@RULES
_xNIL <-
_vg
_det
_xALPHA
_xALPHA
_xWILD [one lookahead match=(_qEOS _xEND)]
@@
# vg alpha prep
@CHECK
if (!N("noun",2))
fail();
if (N("adv",2))
fail();
@POST
L("tmp2") = N(2);
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
nountonp(2,1);
if (pnname(N(1)) == "_vg")
if (!N("voice",1))
N("voice",1) = "active";
@RULES
_xNIL <-
_xWILD [one match=(_vg _verb)]
_xALPHA
_xWILD [one lookahead match=(_prep)]
@@
# to vg conj alpha
# to verb conj alpha
@CHECK
if (!N("verb",4))
fail();
if (!vconjq(N(4),"inf"))
fail();
@POST
L("tmp4") = N(4);
group(4,4,"_verb");
L("v") = N(4);
pncopyvars(L("tmp4"),N(4));
group(4,4,"_vg");
mhbv(N(4),L("neg"),0,0,0,0,L("v"));
N("voice",4) = "active";
pncopyvars(L("tmp4"),N(4));
clearpos(N(4),1,0); # Zero out token info.
if (pnname(N(2)) == "_vg")
if (!N("voice",2))
N("voice",2) = "active";
@RULES
_xNIL <-
to [s]
_xWILD [one match=(_verb _vg)]
_conj
_xALPHA
@@
# dqa alpha alpha prep
@CHECK
if (N("noun",3))
fail();
@POST
L("tmp3") = N(3);
if (N("verb",3))
{
group(3,3,"_verb");
pncopyvars(L("tmp3"),N(3));
L("v") = N(3);
group(3,3,"_vg");
mhbv(N(3),L("neg"),0,0,0,0,L("v"));
# N("voice",3) = 0;
pncopyvars(L("tmp3"),N(3));
N("verb node",3) = L("v");
clearpos(N(3),1,0); # Zero out token info.
}
else if (N("adv",3))
{
group(3,3,"_adv");
pncopyvars(L("tmp3"),N(3));
}
else if (N("adj",3))
{
group(3,3,"_adj");
pncopyvars(L("tmp3"),N(3));
fixadj(N(3));
}
@RULES
_xNIL <-
_xWILD [plus match=(_det _quan _num _xNUM _adj)]
_xALPHA
_xALPHA
_xWILD [one lookahead match=(_prep)]
@@
# det quan adj alpha prep
# dqan
@CHECK
if (!N("noun",2))
fail();
# Todo: agreement.
@POST
L("tmp2") = N(2);
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
@RULES
_xNIL <-
_xWILD [plus match=(_det _quan _num _xNUM _adj)]
_xALPHA
_xWILD [one lookahead match=(_prep _qEOS _xEND)]
@@
# vg out adj
@CHECK
if (N("mypos",2) && N("mypos",2) != "RP")
fail();
@POST
L("tmp2") = N(2);
if (!N("mypos",2)) # Could already be assigned.
{
group(2,2,"_particle");
pncopyvars(L("tmp2"),N(2));
# See if kb has something.
if (L("num") = phrprepverbq(N(1),N(2)))
{
if (L("num") == 2) # prepositional # FIX. 06/18/06 AM.
chpos(N(2),"IN");
else # 1 or 3.
chpos(N(2),"RP"); # Default.
}
# else # Default.
# chpos(N(2),"RP"); #
}
if (pnname(N(1)) == "_vg")
{
if (!N("voice",1))
N("voice",1) = "active";
listadd(1,2,"false");
}
else
{
L("v") = N(1);
group(1,1,"_vg");
mhbv(N(1),L("neg"),0,0,0,0,L("v"));
pncopyvars(L("v"),N(1));
clearpos(N(1),1,0);
N("pos100 v-particle",1) = 1;
listadd(1,2,"false");
}
@RULES
_xNIL <-
_xWILD [one match=(_verb _vg)]
_xWILD [s one match=(out up down off)]
_xWILD [one lookahead match=(_det _quan
_num _xNUM _adj _noun _np _adv)]
@@
# noun alpha alpha np
@CHECK
if (!N("verb",3))
fail();
@POST
L("tmp3") = N(3);
group(3,3,"_verb");
L("v") = N(3);
pncopyvars(L("tmp3"),N(3));
fixverb(N(3),"active","VBP");
group(3,3,"_vg");
mhbv(N(3),L("neg"),0,0,0,0,L("v"));
pncopyvars(L("tmp3"),N(3));
N("voice",3) = "active";
clearpos(N(3),1,0); # Zero out token info.
@RULES
_xNIL <-
_noun
_xALPHA
_xALPHA
_np [lookahead]
@@
# prep alpha prep
@CHECK
if (!N("verb",3))
fail();
if (!vconjq(N(3),"-ing"))
fail();
@POST
L("tmp3") = N(3);
group(3,3,"_verb");
pncopyvars(L("tmp3"),N(3));
L("v") = N(3);
L("neg") = mhbvadv(2,0,0,0);
group(3,3,"_vg");
mhbv(N(3),L("neg"),0,0,0,0,L("v"));
pncopyvars(L("tmp3"),N(3));
N("first verb",3) = N("verb node",3) = L("v");
clearpos(N(3),1,0); # Zero out token info.
fixvg(N(3),"active","VBG");
@RULES
_xNIL <-
_xWILD [s one match=(_prep) except=(to)]
_xWILD [star match=(_adv _advl)]
_xALPHA
_xWILD [one lookahead match=(_prep _fnword)]
@@
# np conj np
@POST
if (N("sem",1) == "date" || N("sem",3) == "date")
S("sem") = "date";
singler(1,3);
@RULES
_np <-
_np
_conj
_np
_xWILD [one lookahead match=(_qEOS _xEND)]
@@
@POST
L("tmp7") = N(7);
group(2,7,"_np");
pncopyvars(L("tmp7"),N(2)); # Todo: compose nps.
N("list",2) = N("compound-np",2) = 1;
N("ne",2) = 0;
if (pnname(N(1)) == "_vg")
if (!N("voice",1))
N("voice",1) = "active";
@RULES
_xNIL <-
_xWILD [one match=(_prep _fnword _verb _vg)]
_np
\,
_np
\, [opt]
_conj
_np
@@
# noun conj np
@POST
nountonp(2,1);
group(2,4,"_np");
@RULES
_xNIL <-
_xSTART
_noun
_conj
_np
@@
# at the same time ,
@CHECK
if (!N("noun",4))
fail();
@POST
L("tmp4") = N(4);
group(4,4,"_noun");
pncopyvars(L("tmp4"),N(4));
@RULES
_xNIL <-
_xWILD [s one match=(at)]
_det [opt]
_adj
_xALPHA
_xWILD [lookahead one match=(\, )]
@@
# same alpha as
@CHECK
if (!N("noun",2))
fail();
@POST
L("tmp2") = N(2);
group(2,2,"_noun");
pncopyvars(L("tmp2"),N(2));
@RULES
_xNIL <-
same [s]
_xALPHA
@@
# alpha up
@CHECK
if (!N("verb",1))
fail();
L("v") = vconj(N(1));
if (L("v") != "-edn"
&& L("v") != "-en"
&& L("v") != "-ing")
fail();
@POST
L("tmp1") = N(1);
group(1,1,"_verb");
pncopyvars(L("tmp1"),N(1));
@RULES
_xNIL <-
_xALPHA
_xWILD [s one match=(up out)]
@@
# vg adj alpha
@CHECK
if (!N("noun",3) && !N("adj",3))
fail();
@POST
L("tmp3") = N(3);
if (N("adj",3))
group(3,3,"_adj");
else if (N("noun",3))
group(3,3,"_noun");
pncopyvars(L("tmp3"),N(3));
if (pnname(N(3)) == "_adj")
fixadj(N(3));
@RULES
_xNIL <-
_xWILD [one match=(_vg _verb)]
_adj
_xALPHA
@@
# vg to vg
# used to make
#@POST
# L("tmp1") = N(1);
# L("tmp4") = N(4);
# L("v") = N("verb node",4);
# if (L("v"))
# {
# chpos(L("v"),"VB"); # infinitive.
# pnreplaceval(L("v"),"inf",1);
# }
#
# # If ambiguous...
# fixvg(N(1),"active","VBD");
#
# group(1,4,"_vg");
# pncopyvars(L("tmp4"),N(1));
# N("voice",1) = 0; # Todo: Compose voice here...
# N("first vg",1) = L("tmp1");
# N("last vg",1) = L("tmp4");
# N("pattern",1) = "vg-to-vg";
#@RULES
#_xNIL <-
# _vg
# _adv [star]
# to [s]
# _vg
# @@
# vg as alpha
@CHECK
if (!N("noun",3) && !N("unknown",3))
fail();
@POST
pnrename(N(2),"_prep");
chpos(N(2),"IN");
@RULES
_xNIL <-
_vg
as [s]
_xALPHA
@@
# np , np
# person , age
# apposition.
@CHECK
if (N("sem",3) != "specific_age")
fail();
@POST
group(1,4,"_np");
N("sem",1) = "person";
@RULES
_xNIL <-
_np
\,
_np
\,
@@
# Looks too old. #
# happy to oblige
#@POST
# if (N("voice",3) == "active")
# {
# # Fix to infinitive.
# L("v") = N("verb node",3);
# chpos(L("v"),"VB");
# }
# L("tmp3") = N(3);
# group(1,1,"_adjc");
# group(1,3,"_vg");
# pncopyvars(L("tmp3"),N(1));
# N("pattern",1) = "adj-to-v";
#@RULES
#_xNIL <-
# _adj
# to [s]
# _vg
# @@
|
@NODES _ROOT _IGNORE
@POST
S("type") = "figure";
single();
@RULES
_figure <-
_beginFigure ### (1)
_xWILD ### (2)
_endFigure ### (3)
@@
|
@PATH _ROOT _contactZone _LINE
# If name has been selected, use it. Else...
# If a single high confidence name was found in contact zone(s)
# of the resume, use it without further reasoning.
@CHECK
if (X("name found",2)) fail();
if (G("humannames") == 1 || N("name found")
|| N("ctcname conf") == X("hi ctcname conf",2))
succeed();
fail();
@POST
# Need a way to get all the pieces of a name.
# Probably should have been put into semantics already.
# For now, designate this name as the chosen one,
# and get it in a subsequent round.
N("name found") = 1;
X("name found") = 1; # Track the line that has the name.
X("name found",2) = 1; # Name for contact zone found.
X("contactName",2) = N("$text"); # At least get total name here.
@RULES
_xNIL <- _humanName @@
|
@CODE
G("nodesToTraverse") = G("root");
G("count") = 0;
G("count2") = 0;
@@CODE
@NODES _ROOT
@POST
"test.txt" << "count: " << G("count") << " passnum: " << G("$passnum") << " rulenum: " << G("$rulenum") << "\n";
G("count") = G("count") + 1;
L("parentNode") = G("nodesToTraverse")[0];
if (N("$text", 3) == conceptname(L("parentNode"))) {
G("nodesToTraverse")[arraylength( G("nodesToTraverse") )] = makeconcept(L("parentNode"), N("$text", 1));
}
noop();
@RULES
_xNIL <-
_xWILD [plus fails=(\t)] ### (1)
\t ### (2)
_xWILD [plus fails=(\n \r)] ### (3)
_xWILD [one matches=(\n \r)] ### (4)
@@
@POST
"test.txt" << "count2: " << G("count2") << " passnum: " << G("$passnum") << " rulenum: " << G("$rulenum") << "\n";
G("count2") = G("count2") + 1;
noop();
@RULES
_xNIL <-
_xNIL ### (1)
@@
|
@NODES _ROOT
@POST
excise(1,3);
noop();
@RULES
_xNIL <-
RID ### (1)
_xNUM ### (2)
\n [opt] ### (3)
@@
|
@PATH _ROOT _story
### Including generic business words in the "stop list". #
### And other general words.
### In this way, they don't count against the current industry.
@POST
++G("total alphas");
@RULES
_xNIL <- _xWILD [s one match=(
finger fingers
order orders ordering ordered
wait waits waiting waited
complain complains complaining complained
pay pas paying paid
amount amounts
check checks checking checked
customer customers
client clients clientele
staff
clerk clerks
employee employees employe employes
checker checkers
manager managers
cashier cashiers
service services
place places placing placed
mess messes
sign signs
behavior behaviors
problem problems hassle hassles
quality qualities
woman women girl girls gal gals
man men boy boys guys
person persons teenager teenagers
minute minutes
hour hours
clue clues
look looks
ruder ruder rudest
slow slower slowest
wrong wronger wrongest
right righter rightest correct perfect
nasty nastier nastiest
mean meaner meanest
hard harder hardest
ill iller illest
sick sicker sickest
dirty dirtier dirtiest
filthy filthier filthiest
sticky stickier stickiest
messy messier messiest
poor poorer poorest
bad worse worst
lousy lousier lousiest
yucky yuckier yuckiest
ucky uckier uckiest
blechy blechier blechiest
blecchy blecchier blecchiest
cruddy cruddier cruddiest
crappy crappier crappiest
small smaller smallest
tiny tinier tiniest
teeny teenier teeniest
teensy teensier teensiest
clueless
incompetent
braindead
allergic sensitive averse intolerant
I we me us he she they him her them you
somehow only just
extremely
forever
remotely
really
very
ever
so
such
too
even
thoroughly
totally
completely
fully
absolutely
likewise similarly
always
sometimes
still
not
never
rarely
)] @@
@POST
# While we're here, COUNT THE ALPHABETICS IN THE TEXT. #
++G("total alphas");
@RULES
_xNIL <- _xWILD [s one matches=(
a
about
above
achieve
achieved
achieves
achieving
across
add
added
adding
adds
after
again
against
al
alike
all
almost
alone
along
alongside
already
also
although
always
am
amid
amidst
among
amongst
an
and
another
any
anyhow
anyway
anyways
apart
apiece
aplenty
apparently
apropos
are
aren
around
as
at
atop
attain
attained
attaining
attains
averse
avoid
awake
aware
away
b
back
bad
badly
barely
barring
base
based
bases
basing
basically
basis
be
because
became
become
becomes
becoming
been
before
began
begin
beginning
begins
behind
being
below
beneath
beside
besides
best
better
between
beyond
big
billion
both
bound
break
briefly
bring
but
by
c
call
can
cause
certain
certainly
chance
chances
change
changed
changes
changing
choose
clear
close
come
compare
compel
complain
complete
completely
concerning
conceivably
concentrate
concerning
consider
considerable
considering
continue
continued
continues
continuing
convince
cordially
could
create
created
creates
creating
criteria
criterion
cut
d
dare
date
day
decide
decidedly
decrease
decreased
decreases
decreasing
deeply
defer
define
definite
definitely
degree
delay
demand
demonstrate
deny
describe
despite
determine
did
direct
directly
disable
discover
do
does
doing
done
double
doubly
doubt
doubtless
down
dozen
due
during
e
each
earlier
earliest
early
easily
easy
eight
eighteen
either
eleven
else
elsewhere
empty
enable
end
enough
entire
entirely
equally
essence
essential
essentially
et
etc
even
evening
evenly
ever
every
everyone
everything
everywhere
exact
exactly
except
excepting
exist
existed
existing
exists
extreme
extremely
f
fast
faster
fastest
few
fewer
fewest
fifteen
find
finding
finds
first
five
follow
following
follows
for
forth
forward
found
four
fourteen
fourth
from
full
g
gave
get
gets
getting
give
go
goes
going
granted
grew
grow
growing
grows
h
had
hard
hardly
has
have
having
height
heights
her
here
hers
high
highly
him
his
how
i
if
important
importantly
in
include
included
includes
including
inclusive
increase
increased
increases
increasing
inside
into
is
it
its
j
just
k
knew
know
knowing
known
knows
l
large
largely
last
late
lately
later
latest
least
left
less
lesser
like
likewise
ll
look
looked
looking
looks
low
lowly
m
many
may
mid
might
milestone
milestones
mine
minus
more
most
much
my
n
near
need
needed
needing
needs
new
nine
nineteen
notwithstanding
now
number
o
of
off
old
on
one
onto
open
opposite
or
other
our
out
outside
outwith
over
p
past
pending
per
plus
put
q
r
re
remain
remained
remaining
remains
right
round
s
same
second
serving
set
seven
seventeen
shall
should
significant
similar
similarly
since
six
sixteen
size
sized
small
so
soon
start
strength
strengths
strong
stronger
strongest
strongly
such
sure
t
take
ten
than
that
the
their
theirs
them
then
there
these
they
third
thirteen
this
three
through
throughout
till
to
today
toward
towards
twelve
two
u
under
underneath
unlike
until
up
upon
us
use
v
via
w
want
wanted
wanting
wants
was
way
weak
weaker
weakest
weakly
week
weekly
well
went
were
what
when
where
while
who
why
will
with
within
without
wonder
wondered
wondering
wonders
would
x
y
year
years
you
your
z
face
facilitate
fail
faint
fairly
fall
fancy
far
farther
farthest
father
favor
favour
feed
feel
fever
few
fewer
fewest
fight
fill
finally
find
fine
finish
firm
first
foremost
fish
flatly
flee
fling
flounder
flu
fly
focus
following
foot
for
start
end
all
that
certain
example
instance
thing
one
sure
forbid
force
forget
forgive
formally
former
formula
formulae
forum
freeze
from
see
full
fully
fun
fundamentally
funds
further
furthest
galore
generally
get
give
place
given
go
good
goods
graciously
granted
great
greatly
greetings
grow
guarantee
guess
half
hand
happen
hardly
hate
have
got
he
head
hear
help
hence
her
here
hero
herself
hide
him
himself
his
hit
hold
holidays
home
honestly
how
however
humbly
hundred
hundredweight
hurt
ideally
identical
if
only
ignore
ill
imagine
immediately
imply
fact
actual
addition
brief
case
front
order
other
words
particular
reality
short
spite
inasmuch
incidentally
incite
include
included
including
inclusion
indeed
indefinite
indicate
induce
inside
insist
insofar
instead
intend
into
introduce
invite
it
its
itself
just
keep
kind
kindly
know
last
least
lastly
lately
later
latter
lead
leaf
lean
leap
learn
least
leave
lend
less
lest
let
alone
lie
light
like
listen
literally
little
live
long
look
upon
looks
lose
sight
track
louse
love
lovely
main
make
allowance
room
man
manage
manners
many
may
might
me
mean
means
medium
meet
mention
mere
merely
midnight
million
mind
mine
mini
minus
minutes
miss
mistake
more
morning
most
mother
mouse
mouth
move
mow
much
multi
must
my
myself
name
namely
natural
naturally
near
nearby
nearly
necessarily
need
needn
needs
neglect
neither
neo
never
next
nice
night
no
doubt
longer
matter
more
sooner
nominally
non
none
noon
nor
not
unnaturally
yet
notice
notify
notwithstanding
now
nowadays
nucleus
observe
oblige
obviously
odds
course
off
offer
officially
old
omit
on
condition
contrary
onto
top
once
one
only
onset
open
opposite
optional
or
so
order
other
otherwise
ought
oughtn
our
ourselves
out
outright
outside
outskirts
outwardly
outwith
over
overlook
owe
owing
pains
part
particular
particulars
partly
pass
passive
past
path
patois
pay
attention
pending
people
pending
per
perfect
perfectly
perhaps
period
permit
personal
personally
persuade
phenomenon
phenomena
place
plain
plan
plane
plead
please
plus
point
position
positive
possibly
postpone
practice
practise
precise
precisely
prefer
premises
prepare
presently
press
presuming
pretend
prevent
principal
principle
private
pro
process
proclaim
promise
propose
protect
prove
provide
provided
providing
pseudo
pull
punish
pure
purely
simply
put
end
stop
quit
quite
rarely
rather
than
read
readily
real
realistically
really
recently
recognize
recollect
recommend
refer
refuse
regard
regards
regret
regularly
rejoice
relative
relieve
rely
remain
remains
remark
remember
remind
rend
render
replace
report
reportedly
repute
reputedly
request
require
research
resent
reserve
resist
resolve
resort
respective
respectively
rest
result
riches
rid
ride
right
rightly
rigid
ring
rise
fall
rising
risk
rob
round
rule
run
with
same
save
saw
say
scarcely
school
sea
see
seeing
seek
seem
seldom
self
selfsame
sell
send
separate
separation
seriously
serve
set
several
sew
shake
shall
should
she
sheaf
shear
shed
sheer
shine
shoot
short
should
show
shrink
shun
shut
sick
signal
similar
similarly
simple
simply
since
sink
sit
slay
sleep
slide
sling
slink
slit
smell
smite
so
sole
solo
some
somehow
something
sooner
sort
sound
sow
space
spare
speak
specific
specify
speed
spell
spend
spill
spin
spirits
spit
split
spoil
spread
spring
stadium
stairs
stand
stare
start
state
steal
stem
stick
still
stimulus
sting
stink
stone
stop
stratum
stress
strew
strict
strictly
speaking
stride
strike
string
strive
strong
structure
structural
style
stylus
subject
such
summer
sunrise
sunset
super
superficial
superficially
supper
supply
suppose
supposedly
supposing
sure
surely
surround
surroundings
suspect
swear
sweat
sweep
swell
swim
swing
tag
take
talk
tantamount
taste
teach
tear
tell
tempo
tempt
tend
tense
test
than
thank
thanks
that
the
their
them
themselves
then
theoretically
there
these
they
thief
thine
think
this
thorough
thoroughly
those
thou
though
thousand
threaten
thrive
through
throughout
throw
thrust
thus
thy
thyself
till
time
to
together
ton
tone
too
total
totally
toward
towards
town
train
tread
treat
true
truly
truth
truthfully
try
turn
twice
twilight
ultimatum
ultra
uncle
under
underneath
understand
undertake
undoubtedly
uniue
unit
unless
unlike
until
unwell
up
upon
upper
urge
us
use
used
utter
utterly
very
via
vice
virtuoso
wait
wake
walk
want
warn
watch
we
weak
wear
wed
weep
well
were
wet
what
whatever
when
whenever
where
whereas
whereby
whereupon
wherever
whether
which
whichever
while
whilst
)] @@
@POST
# Add word to dict, if absent. Get its word-concept.
G("word concept") = addword(N("$text"));
# Get the counter from the word concept.
# (If absent, automatically starts at zero.)
G("frequency") = numval(G("word concept"), "frequency");
# Increment counter and store back in word-concept.
replaceval(G("word concept"), "frequency", ++G("frequency"));
# While we're here, COUNT THE ALPHABETICS IN THE TEXT. #
++G("total alphas");
++G("nonstop alphas"); # 10/08/00 AM.
# noop(); # By default, no reduce occurs if there's code in @POST.
@RULES
_xNIL <-
_xALPHA [s] ### 1
@@
|
@PATH _ROOT _headerZone _liGroup
@POST
"hiGroup.txt" << N("$text",2) << "\n";
single();
@RULES
_hiGroup <-
_iOpen ### (1)
_xWILD [match=(_iGroup)] ### (2)
_iClose ### (3)
@@
|
# Remove concept con from Knowledge Base. Removes entire subhierarchy.
rmconcept(L("con")); |
@NODES _LINE
@PRE
<3,3> cap();
<5,5> length(1);
<7,7> cap();
<9,9> cap();
@POST
group(3, 7, "_streetName");
single();
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _xALPHA [s] \' [s] s [trig s] _xWHITE [star s] _xALPHA [s] _xWHITE [star s] _PostalRoad [s layer=("_road")] @@
@PRE
<3,3> cap();
<5,5> cap();
<7,7> cap();
<10,10> cap();
<10,10> length(5);
@POST
group(3, 5, "_streetName");
single();
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _xALPHA [s] _xWHITE [star s] _xALPHA [s] _xWHITE [star s] _PostalRoad [trig s layer=("_road")] \. [s] _xWHITE [star s] _xWILD [min=1 max=1 s layer=("_postdirection") match=("_cityMod" "_Direction")] @@
@PRE
<3,3> cap();
<5,5> cap();
<5,5> length(4);
<7,7> cap();
<9,9> cap();
<9,9> length(4);
@POST
group(3, 5, "_streetName");
single();
@RULES
# Ex: 33\_Grand\_View\_Rd\_East
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _xALPHA [s] _xWHITE [star s] _PostalRoad [trig s] _xWHITE [star s] _PostalRoad [s layer=("_road")] _xWHITE [star s] _xWILD [min=1 max=1 s layer=("_postdirection") match=("_cityMod" "_Direction")] @@
@PRE
<3,3> cap();
<5,5> cap();
<7,7> cap();
@RULES
# Ex: 33\_Grand\_Rd\_East
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _xALPHA [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [trig s layer=("_road")] _xWHITE [star s] _xWILD [min=1 max=1 s layer=("_postdirection") match=("_cityMod" "_Direction")] @@
@PRE
<1,1> length(3);
<3,3> cap();
<3,3> length(1);
<5,5> cap();
<7,7> cap();
<7,7> length(4);
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _Direction [trig s layer=("_direction")] _xWHITE [star s] _xALPHA [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [s layer=("_road")] @@
@PRE
<1,1> length(4);
<3,3> cap();
<3,3> length(6);
<5,5> cap();
<5,5> length(5);
<7,7> cap();
<7,7> length(6);
@POST
group(3, 5, "_streetName");
single();
@RULES
_addressLine <- 1015 [s layer=("_streetNumber")] _xWHITE [star s] Marlin [s] _xWHITE [star s] _xWILD [min=1 max=1 s match=("_PostalRoad" "Lakes")] _xWHITE [star s] _xWILD [min=1 max=1 s layer=("_road") match=("_PostalRoad" "Circle")] @@
@PRE
<3,3> cap();
<5,5> cap();
<8,8> cap();
<8,8> length(5);
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _xALPHA [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [trig s layer=("_road")] \. [s] _xWHITE [star s] _xWILD [min=1 max=1 s layer=("_postdirection") match=("_cityMod" "_Direction")] @@
@PRE
<1,1> length(3);
<5,5> cap();
<5,5> length(4);
@RULES
_addressLine <- 260 [s layer=("_streetNumber")] _xWHITE [star s] _xWILD [min=1 max=1 s layer=("_streetName") match=("_city" "_Caps" "_cityPhrase")] _xWHITE [star s] _xWILD [min=1 max=1 s layer=("_road") match=("_PostalRoad" "Blvd")] @@
@PRE
<1,1> length(2);
<3,3> cap();
<3,3> length(4);
<6,6> cap();
<6,6> length(2);
@RULES
# Ex: 45\_Worm\_Apple\_Rd.
_addressLine <- 45 [s layer=("_streetNumber")] _xWHITE [star s] Worm [s] _xWHITE [star s] _xWILD [min=1 max=1 s match=("_Caps" "_hardware")] _xWILD [min=1 max=1 s layer=("_road") match=("_PostalRoad" "Rd")] \. [s] @@
@PRE
<1,1> length(3);
<3,3> cap();
<3,3> length(1);
<5,5> cap();
<7,7> cap();
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _Direction [trig s layer=("_direction")] _xWHITE [star s] _xALPHA [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [s layer=("_road")] \. [s] @@
@PRE
<1,1> length(3);
<3,3> cap();
<3,3> length(3);
<5,5> cap();
<5,5> length(4);
<7,7> cap();
<7,7> length(4);
@POST
group(3, 5, "_streetName");
single();
@RULES
_addressLine <- 111 [s layer=("_streetNumber")] _xWHITE [star s] Ink [s] _xWHITE [star s] Spot [s] _xWHITE [star s] _xWILD [min=1 max=1 s layer=("_road") match=("_PostalRoad" "Blvd")] \. [s] @@
@PRE
<3,3> cap();
<7,7> cap();
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _Direction [trig s layer=("_direction")] _xWHITE [star s] _ordinal [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [s layer=("_road")] \. [s] @@
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _Direction [trig s layer=("_direction")] _xWHITE [star s] _ordinal [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [s layer=("_road")] @@
@PRE
<3,3> cap();
<5,5> cap();
@RULES
# Ex: 18\_Peters\_Street
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _xALPHA [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [trig s layer=("_road")] @@
@PRE
<3,3> cap();
<3,3> length(8);
<5,5> cap();
<5,5> length(2);
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _xALPHA [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [trig s layer=("_road")] \. [s] @@
@PRE
<3,3> cap();
<3,3> length(1);
<5,5> cap();
<5,5> length(1);
<10,10> cap();
@POST
group(3, 6, "_direction");
single();
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _Direction [trig s] \. [s] _Direction [s] \. [s] _xWHITE [star s] _ordinal [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [s layer=("_road")] \. [s] @@
@PRE
<5,5> cap();
<7,7> cap();
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _ordinal [trig s layer=("_streetName")] _xWHITE [star s] _PostalRoad [s layer=("_road")] _xWHITE [star s] _Direction [s layer=("_postdirection")] @@
@PRE
<5,5> cap();
<5,5> length(2);
<8,8> cap();
<8,8> length(1);
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _ordinal [trig s layer=("_streetName")] _xWHITE [star s] _PostalRoad [s layer=("_road")] \. [s] _xWHITE [star s] _Direction [s layer=("_postdirection")] \. [s] @@
@PRE
<3,3> cap();
<3,3> length(1);
<6,6> cap();
<8,8> cap();
@RULES
# Ex: 22\_W.\_Grinch\_Road
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _Direction [trig s layer=("_direction")] \. [s] _xWHITE [star s] _xALPHA [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [s layer=("_road")] @@
@PRE
<3,3> cap();
<3,3> length(1);
<6,6> cap();
<6,6> length(6);
<8,8> cap();
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _Direction [trig s layer=("_direction")] \. [s] _xWHITE [star s] _xALPHA [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [s layer=("_road")] \. [s] @@
@PRE
<3,3> cap();
<3,3> length(1);
<8,8> cap();
@RULES
_addressLine <- _xNUM [s layer=("_streetNumber")] _xWHITE [star s] _Direction [trig s layer=("_direction")] \. [s] _xWHITE [star s] _ordinal [s layer=("_streetName")] _xWHITE [star s] _PostalRoad [s layer=("_road")] @@
@RULES
# Ex: P.O.\_Box\_3
_addressLine <- _xWILD [min=1 max=1 s match=("_addressLine" "_poBoxNumber")] @@
|
@PATH _ROOT _textZone _LINE _conjugation
@POST
X("up",3) = 1;
excise(1,1);
noop();
@RULES
_xNIL <-
_xWHITE [s] ### (1)
@@
|
# Tracking in the line and globally.
# Should count tabs also.
@PATH _ROOT _LINE
@POST
# S("bullet") = X("bullet") = "*"; # Commented out. #
if (X("nindent") == 3)
++X("Netscape * bullet");
# Track max and min bullet indent.
if (!G("* bullets")) # No * bullets seen yet.
G("* bullet max") = G("* bullet min") = X("nindent");
if (X("nindent") > G("* bullet max"))
G("* bullet max") = X("nindent");
if (X("nindent") < G("* bullet min"))
G("* bullet min") = X("nindent");
# Globally track number of bullets.
++G("* bullets");
++G("Netscape * bullets");
# modified 01.28.2k - PS:
# single()
noop();
@RULES
# modified 01.28.2k - PS: was bleeding degree phrases
#_bullet [base] <-
_xNIL <-
_xSTART
_whtINDENT [s star]
\* [s] # The bullet
_xWILD [s plus match=( _xWHITE _whtSEP)]
_xWILD [s one lookahead fail=( \* )]
@@
@PRE
<3,3> lowercase()
@POST
# S("bullet") = X("bullet") = "o"; # Commented out. #
if (X("nindent") == 8)
++X("Netscape o bullet");
# Track max and min bullet indent.
if (!G("o bullets")) # No o bullets seen yet.
G("o bullet max") = G("o bullet min") = X("nindent");
if (X("nindent") > G("* bullet max"))
G("o bullet max") = X("nindent");
if (X("nindent") < G("o bullet min"))
G("o bullet min") = X("nindent");
# Globally track number of bullets.
++G("o bullets");
++G("Netscape o bullets");
# modified 01.28.2k - PS:
# single()
noop();
@RULES
# modified 01.28.2k - PS: was bleeding degree phrases
#_bullet [base] <-
_xNIL <-
_xSTART
_whtINDENT [s star]
o [s] # The bullet.
_xWILD [s plus match=( _xWHITE _whtSEP)]
@@
@POST
# S("bullet") = X("bullet") = "+"; # Commented out. #
if (X("nindent") == 13)
++X("Netscape + bullet");
# Track max and min bullet indent.
if (!G("+ bullets")) # No + bullets seen yet.
G("+ bullet max") = G("+ bullet min") = X("nindent");
if (X("nindent") > G("+ bullet max"))
G("+ bullet max") = X("nindent");
if (X("nindent") < G("+ bullet min"))
G("+ bullet min") = X("nindent");
# Globally track number of bullets.
++G("+ bullets");
++G("Netscape + bullets");
single();
@RULES
_bullet [base] <-
_xSTART
_whtINDENT [s star]
\+ [s] # The bullet
_xWILD [s plus match=( _xWHITE _whtSEP)]
@@
# Dashes are ambiguous. (They could start widow lines.)
# So we'll wait and see before committing.
@POST
X("bullet") = "-";
# Track max and min bullet indent.
if (!G("- bullets")) # No - bullets seen yet.
G("- bullet max") = G("- bullet min") = X("nindent");
if (X("nindent") > G("- bullet max"))
G("- bullet max") = X("nindent");
if (X("nindent") < G("- bullet min"))
G("- bullet min") = X("nindent");
# Globally track number of bullets.
++G("- bullets");
# noop()
@RULES
_xNIL <-
_xSTART
_whtINDENT [s star]
\- [s] # The bullet
_xWILD [s plus match=( _xWHITE _whtSEP)]
@@
# Throwing some other stuff here for now. #
# Flag a line as an HTML converted turd.
@POST
++X("html turd");
# noop()
@RULES
_xNIL <- \[ [s] _xWILD \] [s] @@
|
# note: The funny looking stuff here matches all the zones but
# only reduces the FIRST zone found to a contactZone. Just a trick
# for singling out the first item in a list.
# (may be working just because a single REZZONE is found in the list ;-)
# @NODES _ROOT
@PATH _ROOT
@POST
singler(2,2);
@RULES
# note: I believe rules fail with wildcard at start or end of rule.
# Will fix after Tuesday demo for PM.
#_contactZone <- _REZZONE _xWILD _xEND @@
#_contactZone <- _xSTART _REZZONE _xWILD _xANY @@
_contactZone [unsealed] <- _xSTART _REZZONE @@
|
@NODES _ROOT
@POST
excise(1,1)
@RULES
_xNIL <-
_BLANKLINE [s] ### (1)
@@
|
@CODE
L("hello") = 0;
if (G("studyout"))
closefile(G("studyout"));
@@CODE
@NODES _sent
@CHECK
# Agreement...
L("arr") = vgagree(N(1),N(3),N(5),N(7),N(9));
if (!L("arr")[0])
fail();
@POST
L("m") = N(1);
L("h") = N(3);
L("b") = N(5);
L("being") = N(7);
L("v") = N(9);
L("neg") = mhbvadv(2,4,6,8);
if (N(7))
{
N("sem",7) = N("stem",7) = "be";
chpos(N(7),"VBG");
}
# Should be a more compact way to do the below...
# At least group could return the grouped node.
if (N(1))
{
group(1,9,"_vg");
mhbv(N(1),L("neg"),L("m"),L("h"),L("b"),L("being"),L("v"));
}
else if (N(3))
{
group(3,9,"_vg");
mhbv(N(3),L("neg"),L("m"),L("h"),L("b"),L("being"),L("v"));
}
else if (N(5))
{
group(5,9,"_vg");
mhbv(N(5),L("neg"),L("m"),L("h"),L("b"),L("being"),L("v"));
}
else if (N(7))
{
group(7,9,"_vg");
mhbv(N(7),L("neg"),L("m"),L("h"),L("b"),L("being"),L("v"));
}
else
{
group(9,9,"_vg");
mhbv(N(9),L("neg"),L("m"),L("h"),L("b"),L("being"),L("v"));
}
@RULES
_xNIL <-
_xWILD [s opt match=(_modal _do)]
_xWILD [star match=(_adv _advl)]
_have [s opt]
_xWILD [star match=(_adv _advl)]
_be [s opt]
_xWILD [star match=(_adv _advl)]
being [s opt]
_xWILD [star match=(_adv _advl)]
_xWILD [s one match=(_verb) except=(_modal _have _be _vg)]
@@
####### MHB ALPHA
@CHECK
if (!N("verb",9))
fail();
if (!N(1) && !N(3) && !N(5) && !N(7)) # 06/15/06 AM.
fail();
# Agreement...
L("arr") = vgagree(N(1),N(3),N(5),N(7),N(9));
if (!L("arr")[0])
fail();
@POST
L("tmp9") = N(9);
group(9,9,"_verb");
pncopyvars(L("tmp9"),N(9));
L("m") = N(1);
L("h") = N(3);
L("b") = N(5);
L("being") = N(7);
L("v") = N(9);
if (N(7))
{
N("sem",7) = N("stem",7) = "be";
chpos(N(7),"VBG");
}
L("neg") = mhbvadv(2,4,6,8);
group(1,9,"_vg");
mhbv(N(1),L("neg"),L("m"),L("h"),L("b"),L("being"),L("v"));
@RULES
_xNIL <-
_xWILD [s opt match=(_modal _do)]
_xWILD [star match=(_adv _advl)]
_have [s opt]
_xWILD [star match=(_adv _advl)]
_be [s opt]
_xWILD [star match=(_adv _advl)]
being [s opt]
_xWILD [star match=(_adv _advl)]
_xALPHA
@@
@CHECK
if (!N("verb",7))
fail();
# Agreement...
L("arr") = vgagree(0,N(1),N(3),N(5),N(7));
if (!L("arr")[0])
fail();
@POST
L("tmp7") = N(7);
group(7,7,"_verb");
pncopyvars(L("tmp7"),N(7));
L("m") = 0;
L("h") = N(1);
L("b") = N(3);
L("being") = N(5);
L("v") = N(7);
if (N(5))
{
N("sem",5) = N("stem",5) = "be";
chpos(N(5),"VBG");
}
L("neg") = mhbvadv(2,4,6,0);
group(1,7,"_vg");
mhbv(N(1),L("neg"),L("m"),L("h"),L("b"),L("being"),L("v"));
@RULES
_xNIL <-
_have [s]
_xWILD [star match=(_adv _advl)]
_be [s opt]
_xWILD [star match=(_adv _advl)]
being [s opt]
_xWILD [star match=(_adv _advl)]
_xALPHA
@@
@CHECK
if (!N("verb",5))
fail();
# Agreement...
L("arr") = vgagree(0,0,N(1),N(3),N(5));
if (!L("arr")[0])
fail();
@POST
L("tmp5") = N(5);
group(5,5,"_verb");
pncopyvars(L("tmp5"),N(5));
L("m") = 0;
L("h") = 0;
L("b") = N(1);
L("being") = N(3);
L("v") = N(5);
if (N(3))
{
N("sem",3) = N("stem",3) = "be";
chpos(N(3),"VBG");
}
L("neg") = mhbvadv(2,4,0,0);
group(1,5,"_vg");
mhbv(N(1),L("neg"),L("m"),L("h"),L("b"),L("being"),L("v"));
@RULES
_xNIL <-
_be [s]
_xWILD [star match=(_adv _advl)]
being [s opt]
_xWILD [star match=(_adv _advl)]
_xALPHA
@@
##### VERBLESS
@CHECK
# Agreement...
L("arr") = vgagree(N(1),N(3),N(5),N(7),0);
# "err.txt" << L("arr") << phrasetext() << "\n";
if (!L("arr")[0])
fail();
S("voice") = "active";
@POST
L("m") = N(1);
L("h") = N(3);
L("b") = N(5);
L("being") = N(7);
if (N(7))
{
N("sem",7) = N("stem",7) = "be";
chpos(N(7),"VBG");
}
L("neg") = mhbvadv(2,4,6,0);
group(1,7,"_vg");
mhbv(N(1),L("neg"),L("m"),L("h"),L("b"),L("being"),0);
N("voice",1) = S("voice");
@RULES
_xNIL <-
_xWILD [s one match=(_modal _do) except=(_vg _np)]
_xWILD [star match=(_adv _advl)]
_have [s opt]
_xWILD [star match=(_adv _advl)]
_be [s opt]
_xWILD [star match=(_adv _advl)]
being [s opt]
@@
@CHECK
# Agreement...
L("arr") = vgagree(0,N(1),N(3),N(5),0);
S("verb node") = N(1);
if (!L("arr")[0])
fail();
# S("voice") = "active";
@POST
L("h") = N(1);
L("b") = N(3);
L("being") = N(5);
if (N(5))
{
N("sem",5) = N("stem",5) = "be";
chpos(N(5),"VBG");
}
L("neg") = mhbvadv(2,4,0,0);
if (N(5))
group(1,5,"_vg");
else if (N(3))
group(1,3,"_vg");
else
group(1,1,"_vg");
mhbv(N(1),L("neg"),0,L("h"),L("b"),L("being"),0);
@RULES
_xNIL <-
_xWILD [s one match=(_have) except=(_vg)]
_xWILD [star match=(_adv _advl)]
_be [s opt]
_xWILD [star match=(_adv _advl)]
being [s opt]
@@
@CHECK
# Agreement...
L("arr") = vgagree(0,0,N(1),N(3),0);
if (!L("arr")[0])
fail();
S("voice") = "active";
if (N(3))
{
N("sem",3) = N("stem",3) = "be";
chpos(N(3),"VBG");
}
@RULES
_vg <-
_be [s]
_xWILD [star match=(_adv _advl)]
being [s]
@@
# be
@POST
L("tmp1") = N(1);
L("neg") = mhbvadv(2,0,0,0);
group(1,1,"_vg");
mhbv(N(1),L("neg"),0,0,L("tmp1"),0,0);
pncopyvars(L("tmp1"),N(1));
N("voice",1) = "active";
clearpos(N(1),1,0);
@RULES
_xNIL <-
_be [s except=(_vg)]
_adv [star]
_xWILD [one fail=(_xALPHA _verb)]
@@
_xNIL <-
_be [s except=(_vg)]
_adv [star]
_xEND
@@
# noun alpha by
@CHECK
if (!N("verb",2))
fail();
if (!vconjq(N(2),"-en"))
fail();
@POST
L("tmp2") = N(2);
group(2,2,"_verb");
L("vb") = N(2);
pncopyvars(L("tmp2"),N(2));
group(2,2,"_vg");
pncopyvars(L("tmp2"),N(2));
N("voice",2) = "passive";
N("verb node",2) = L("vb");
clearpos(N(2),1,0); # Zero out token info.
N("ellipted-rel",2) = 1;
@RULES
_xNIL <-
_noun
_xALPHA
_prep [lookahead]
@@
# to alpha
@CHECK
if (!N("verb",2))
fail();
if (!vconjq(N(2),"inf"))
fail();
@POST
L("tmp") = N(2);
group(2,2,"_verb");
pncopyvars(L("tmp"),N(2));
chpos(N(2),"VB"); # Infinitive.
group(2,2,"_vg");
pncopyvars(L("tmp"),N(2));
N("voice",2) = "active";
clearpos(N(2),1,0); # Zero out token info.
@RULES
_xNIL <-
to [s]
_xALPHA
_xWILD [lookahead one match=(_conj _prep _fnword
_det _quan _num _xNUM _adj _noun \,)]
@@
# noun alpha noun
# to alpha noun
# prep alpha noun
@CHECK
if (!N("verb",3))
fail();
if (!vconjq(N(3),"inf"))
fail();
@POST
L("tmp3") = N(3);
group(3,3,"_verb");
L("v") = N(3);
pncopyvars(L("tmp3"),N(3));
fixverb(N(3),"active","VB");
L("neg") = mhbvadv(2,0,0,0);
group(3,3,"_vg");
mhbv(N(3),L("neg"),0,0,0,0,L("v"));
pncopyvars(L("tmp3"),N(3));
N("voice",3) = "active";
clearpos(N(3),1,0); # Zero out token info.
@RULES
#_xNIL <-
# _xWILD [one match=(_noun _np _pro)]
# _xWILD [star match=(_adv _advl)]
# _xALPHA
# _xWILD [star match=(_adv _advl)]
# _xWILD [one lookahead match=(_noun _np _pro _det _prep _conj)]
# @@
# to alpha
_xNIL <-
_xWILD [s one match=(to)]
_xWILD [star match=(_adv _advl)]
_xALPHA
_xWILD [star match=(_adv _advl)]
_xWILD [one lookahead match=(_noun _np _pro _det _prep _conj
_fnword _whword \, )]
@@
# to alpha
@CHECK
if (!N("verb",3))
fail();
if (!vconjq(N(3),"inf"))
fail();
@POST
L("tmp3") = N(3);
group(3,3,"_verb");
pncopyvars(L("tmp3"),N(3));
L("v") = N(3);
L("neg") = mhbvadv(2,0,0,0);
group(3,3,"_vg");
mhbv(N(3),L("neg"),0,0,0,0,L("v"));
pncopyvars(L("tmp3"),N(3));
N("verb node",3) = L("v");
fixvg(N(3),"active","VB");
clearpos(N(3),1,0);
@RULES
_xNIL <-
_xWILD [s one match=(to)]
_xWILD [star match=(_adv _advl)]
_xALPHA
@@
|
@PATH _ROOT _RULES
@POST
rfarules(1)
single()
@RULES
_RULES [base] <- _RULE [plus trig] @@
|
@NODES _ROOT
@RULES
_patientID <-
\[ ### (1)
\* ### (2)
\* ### (3)
_xWILD [fails=(\*)] ### (4)
\* ### (5)
\* ### (6)
\] ### (7)
@@
@RULES
_time <-
_xNUM
\:
_xNUM
\: [opt]
_xNUM [opt]
\_ [opt]
_xWILD [opt match=(am pm AM PM)]
@@
@POST
excise(6,6);
excise(4,4);
excise(2,2);
single();
# Initialism
@RULES
_init <-
_xWILD [one matches=(_xNUM _xALPHA)] ### (1)
\. ### (2)
_xWILD [one matches=(_xNUM _xALPHA)] ### (3)
\. ### (4)
_xWILD [opt matches=(_xNUM _xALPHA)] ### (5)
\. [opt] ### (6)
@@
@POST
excise(2,2);
singler(1,1);
@RULES
_init <-
# Add abbreviations here, in the form dr., jr., etc
_xWILD [one match=(Dr DR dr q etc)]
\.
_xWILD [one matches=(\_ \, \:)]
@@
# Remove attending clinician line.
@POST
excise(1,4);
@RULES
_init <-
_xWILD [one match=(Attending attending)] ### (1)
\: ### (2)
_xWHITE [opt] ### (3)
_patientID [opt] ### (4)
@@
# Remove stop words
@PRE
<1,1> vareq("s", "stop");
@POST
"stop_words.txt" << N("$text", 1) << "\n";
excise(1,1);
@RULES
_xNIL <-
_xANY
@@
# Remove stop words which have been reduced to _stop
@POST
"stop_words.txt" << N("$text", 1) << "\n";
excise(1,1);
@RULES
_xNIL <-
_stop
@@ |
@PATH _ROOT _RULES
@RECURSE listarg
@POST
rfaarg(1)
single()
@RULES
_ARG [base] <- _NONLIT @@
_ARG [base] <- _LIT @@
_ARG [base] <- _STR @@
_ARG [base] <- _NUM @@
@@RECURSE listarg
@POST
rfalist(2)
single()
@RULES
_LIST [base] <- \( _xWILD [match=(_LIT _NONLIT _STR _NUM) recurse=(listarg)] \) @@
|
@MULTI _ROOT _section
@POST
excise(1,1);
@RULES
_xNIL <-
_break
@@
|
@CODE
L("node") = pndown(pnroot());
L("filename") = G("$apppath") + "\\input\\X\\" + G("$inputhead") + "." + G("$inputtail");
L("file") = openfile(L("filename"));
# L("debugname") = G("$inputpath") + "debug.txt";
# L("debug") = openfile(L("debugname"),"app");
# L("debug") << L("filename") << "\n";
# closefile(L("debug"));
while (L("node")) {
L("file") << pnvar(L("node"),"$treetext") << "\n";
L("node") = pnnext(L("node"));
}
closefile(L("file"));
@@CODE |
@DECL
#############
# PRINTRULE
#############
printrule(L("clause"))
{
if (!L("clause"))
return;
L("n") = pndown(L("clause"));
if (!L("n"))
return;
# Assume we have a good clause.
G("rout") << "\n_clause <-";
# Traverse.
L("a flag") = 0; # Flag if working on adverbial.
while (L("n"))
{
L("a flag") = printruleelt(L("n"),L("a flag"),G("rout"));
L("n") = pnnext(L("n"));
}
G("rout") << " @@";
}
#############
# PRINTRULEELTS
#############
printruleelts(
L("start"), # 1st node.
L("end"), # last node.
L("a flag"), # If last was an adverbial.
L("rout") # Stream to print to.
)
{
if (!L("start") || !L("rout"))
return L("a flag");
if (L("end"))
L("end") = pnnext(L("end"));
while (L("start") && L("start") != L("end"))
{
L("a flag") = printruleelt(L("start"),L("a flag"),L("rout"));
L("start") = pnnext(L("start"));
}
return L("a flag");
}
#############
# PRINTRULEELT
#############
printruleelt(
L("n"), # Current node.
L("a flag"), # If last was an adverbial.
L("rout") # Stream to print to.
)
{
if (!L("n") || !L("rout"))
return L("a flag");
L("name") = pnname(L("n"));
if (L("name") == "_adv" || L("name") == "_advl")
{
if (!L("a flag")) # First adverbial in group.
{
L("rout") << " ";
L("a flag") = 1;
# G("rout") << "_xWILD [star match=(_adv _advl)]";
L("rout") << "_advl"; # For readability...
}
}
else
{
L("rout") << " ";
L("a flag") = 0; # Reset.
L("ch") = strpiece(L("name"),0,0); # First char.
if (L("ch") == "_") # nonliteral.
L("rout") << L("name");
# else if (strisdigit(L("ch")) || strisalpha(L("ch")))
# L("rout") << L("name");
else if (strisdigit(L("ch")))
L("rout") << "num";
else if (strisalpha(L("ch")))
{
if (pnvar(L("n"),"posarr len"))
{
L("arr") = pnvar(L("n"),"posarr");
L("pos") = L("arr")[0];
L("pos2") = strpiece(L("pos"),0,1);
if (L("pos2") == "NN"
|| L("pos2") == "VB"
|| L("pos2") == "JJ"
|| L("pos2") == "RB")
L("rout") << L("pos2");
else
L("rout") << L("pos");
}
else
L("rout") << "alpha";
}
else # punct
L("rout") << "\\" << L("name");
}
return L("a flag");
}
#############
# PRINTRULEALPHAS
#############
printrulealphas(L("count"),L("rout"))
{
if (!L("count") || !L("rout"))
return;
while (L("count") > 0)
{
L("rout") << " " << "alpha";
--L("count");
}
}
@CODE
if (!G("verbose"))
exitpass();
G("rout") = "rule.txt";
G("rout") << "\n# Automatically gen'd rule file." << "\n";
G("rout") << "@PATH _ROOT _TEXTZONE _sent" << "\n";
# Keep it simple for now, with one rule region.
G("rout") << "\n@RULES\n";
@@CODE
#@PATH _ROOT _TEXTZONE _sent _clause
#@PATH _ROOT _TEXTZONE _sent # Comment. #
@NODES _sent # 07/13/12 AM.
# Every time I see a clause with a verb group,
#traverse it and print out a grammar rule for it.
#@POST
# printrule(X(4)); # Supply the clause node.
#@RULES
#_xNIL <-
# _xANY [plus]
# _vg [trigger]
# _xANY [plus]
# @@
@POST
printrule(N(1));
@RULES
_xNIL <-
_clause
@@
|
@NODES _ROOT
@POST
if (N("$text",2))
S("text") = strsubst(N("$text",2),"'"," \\\' ");
single();
@RULES
_string <-
\" ### (1)
_xWILD [fail=(\")] ### (2)
\" ### (3)
@@
|
@NODES _LINE
@POST
X("country",2) = strtolower(strtrim(N("$text",1)));
@RULES
_xNIL <-
_xWILD [fail=(_codes)]
_codes ### (1)
@@
|
@PATH _ROOT _LINE _Caps _Caps
# Job title root word (eg, "programmer") at the end of phrase
# gets a bonus.
@POST
++X("jobtitleroots"); # Bump in _Caps context node.
if (N("$end")) # If last node in caps phrase.
++X("end jobtitleroot"); # Bonus. Last word in cap phrase.
# noop() # Implicit.
@RULES
_xNIL <- _jobTitleRoot [s] @@
_xNIL <- _jobPhrase [s] @@ # 12/26/99 AM.
_xNIL <- _jobTitle [s] @@ # 01/01/00 AM.
@POST
++X("jobmods"); # Bump in _Caps context node.
if (N("$end")) # If last node in caps phrase.
++X("end jobmod"); # Bonus. Last word in cap phrase.
# noop() # Implicit.
@RULES
_xNIL <- _jobMod [s] @@
|
@NODES _ROOT
@RULES
_DeclSep <-
_PEReference [one] ### (1)
@@
_DeclSep <-
_whiteSpace [one] ### (1)
@@
@@RULES
|
@NODES _LINE
@RULES
# College and Univertity, Institute & Seminary
_SchoolRoot [] <- _SchoolRoot _xWHITE
_xWILD [s one match = (and And \&)] _xWHITE _SchoolRoot @@
# A&M
_SchoolName [] <- A [s] \& M [s] @@
# A&T
_SchoolName [] <- A [s] \& [s] T [s]@@
# Agricultural and {Mechanical | Technical)
_SchoolName [] <- Agricultural [s] _xWHITE
_xWILD [s one match = (and And \&)]_xWHITE
_xWILD [s one match = (Mechanical Technical)] @@
# {Art | Science | Mining | Technology | Design }
_SchoolName []<-
_xWILD [s one match =(Business Commerce Art Science Mining Technology Design)] _xWHITE
_xWILD [s one match = (and And \&)]_xWHITE
_xWILD [s one match =(Business Commerce Art Science Mining Technology Design)] @@
# Hobart and William Smith Colleges
_SchoolName [] <- Hobart [s]_xWHITE
_xWILD [s one match = (and And \&)]_xWHITE
William [s]_xWHITE Smith [s] @@
# Washington and Jefferson College
_SchoolName [] <- Washington [s] _xWHITE
_xWILD [s one match = (and And \&)] _xWHITE
Jefferson [s]@@
# William and Mary
_SchoolName [] <- William [s] _xWHITE
_xWILD [s one match = (and And \&)] _xWHITE
Mary [s]@@
# Bryant & Stratton
_SchoolName [] <- Bryant [s] _xWHITE
_xWILD [s one match = (and And \&)] _xWHITE
Stratton [s]@@
# Davis & Elkins College
_SchoolName [] <- Davis [s]_xWHITE
_xWILD [s one match = (and And \&)] _xWHITE
Elkins [s] @@
# Emory & Henry College
_SchoolName [] <- Emory [s] _xWHITE
_xWILD [s one match = (and And \&)] _xWHITE
Henry [s] @@
# Johnson & Wales
_SchoolName [] <- Johnson [s]_xWHITE
_xWILD [s one match = (and And \&)] _xWHITE
Wales [s]@@
# Franklin & Marshall
_SchoolName [] <- Franklin [s] _xWHITE
_xWILD [s one match = (and And \&)] _xWHITE
Marshall [s]@@
# Lewis & Clark College
_SchoolName [] <- Lewis [s] _xWHITE
_xWILD [s one match = (and And \&)] _xWHITE
Clark [s]@@
# Washington & Lee University
_SchoolName [] <- Washington [s]_xWHITE
_xWILD [s one match = (and And \&)] _xWHITE
Lee [s]@@
# Cooper Union
_SchoolName [] <- Cooper [s] _xWHITE Union [s]@@ |
# Fetch concept-value of attribute (must be first).
L("return_con") = conval(L("con"), L("name")); |
@DECL
DisplayCode(L("code")) {
L("codename") = conceptname(L("code"));
L("con") = FindCodeHier(L("codename"),G("icd11"));
if (L("con")) {
L("count") = 0;
while (up(L("con")) && conceptname(L("con")) != "ICD11") {
L("path")[L("i")++] = L("con");
L("count")++;
L("con") = up(L("con"));
}
L("i") = L("count") - 1;
L("indent") = 0;
while (L("i") >= 0) {
L("con") = L("path")[L("i")];
L("title") = strval(L("con"),"title");
"matches.txt" << SpacesStr(L("indent")) << conceptname(L("con")) << ": " << L("title") << "\n";
L("con") = down(L("con"));
L("indent") = L("indent") + 2;
L("i")--;
}
}
}
FindCodeHier(L("code"),L("con")) {
L("child") = down(L("con"));
while (L("child")) {
# "debug.txt" << conceptname(L("child")) << "\n";
if (conceptname(L("child")) == L("code")) {
return L("child");
}
if (down(L("child"))) {
L("found") = FindCodeHier(L("code"),L("child"));
if (L("found"))
return L("found");
}
L("child") = next(L("child"));
}
}
@@DECL |
@PATH _ROOT _textZone _LINE
@POST
S("header") = N("$text",2);
X("header") = N("$text",2);
X("up") = 1;
X("level") = strlength(N("$text",1));
"header.txt" << N("$text",2) << "\n";
single();
@RULES
_header <-
_xWILD [min=2 match=(\=)] ### (1)
_xWILD [plus fail=(\=)] ### (2)
_xWILD [min=2 match=(\=)] ### (3)
@@
|
# Remove the two _adjs nodes from the parse tree, merging their children under a new _adjs node
@POST
merger(1,2);
@RULES
_adjs <- _adjs _adjs _xyz @@ |
@CODE
G("trend file") << "TRENDS: RAW ALPHABETIC FREQUENCY" << "\n";
G("trend file") << "================================" << "\n";
# TRAVERSE DICTIONARY HIERARCHY. (Has 2 levels of dictionary indices.)
G("dict level 1") = down(G("dict alpha")); # Get level 1.
while (G("dict level 1")) # Traverse level 1.
{
G("dict level 2") = down(G("dict level 1")); # Get level 2.
while (G("dict level 2")) # Traverse level 2.
{
G("dict word") = down(G("dict level 2")); # Get word concepts.
while (G("dict word"))
{
# Print out words and their frequencies.
G("freq") = numval(G("dict word"), "frequency");
if (G("freq") > 0)
G("trend file")
<< rightjustifynum(G("freq"),5)
<< " "
<< LJ(conceptname(G("dict word")),12)
<< "\n";
G("dict word") = next(G("dict word")); # Get next word.
}
G("dict level 2") = next(G("dict level 2")); # Next level 2 index.
}
G("dict level 1") = next(G("dict level 1")); # Next level 1 index.
}
@@CODE
|
@NODES _LINE
# Some phrases. #
#@PRE
#<1,1> cap()
#<3,3> cap()
@CHECK
if (X("nblobs") >= 5) fail();
# Xlt("nblobs", 5)
@RULES
# Should have a confidence bonus if the header covers the entire line.
#_contactHeader <- Personal [s] _xWHITE [s star] Information [s] @@
_contactHeader <-
_xWILD [s one match=(_ContactHeaderPhrase _ContactHeaderWord)] @@
_educationHeader <-
_xWILD [s one match=(_EducationHeaderPhrase _EducationHeaderWord)] @@
_experienceHeader <-
_xWILD [s one match=(_ExperienceHeaderPhrase _ExperienceHeaderWord)] @@
_objectiveHeader <-
_xWILD [s one match=(_ObjectiveHeaderPhrase _ObjectiveHeaderWord)] @@
_skillsHeader <-
_xWILD [s one match=(_SkillsHeaderPhrase _SkillsHeaderWord)] @@
_referencesHeader <-
_xWILD [s one match=(_ReferencesHeaderPhrase _ReferencesHeaderWord)] @@
_presentationsHeader <-
_xWILD [s one match=(_PresentationsHeaderPhrase _PresentationsHeaderWord)] @@
_publicationsHeader <-
_xWILD [s one match=(_PublicationsHeaderPhrase _PublicationsHeaderWord)] @@
_otherHeader <-
_xWILD [s one match=(_OtherHeaderPhrase _OtherHeaderWord)] @@
@PRE
<3,3> cap();
@RULES
_contactHeader <-
_xSTART
_xWILD [s star match=( _xWHITE _whtINDENT)]
_xWHITE [s star]
_ContactHeaderWord [s t]
\: [s opt]
@@
_objectiveHeader <-
_xSTART
_xWILD [s star match=( _xWHITE _whtINDENT)]
_xWHITE [s star]
_ObjectiveHeaderWord [s t]
\: [s opt]
@@
_experienceHeader <-
_xSTART
_xWILD [s star match=( _xWHITE _whtINDENT)]
_xWHITE [s star]
_ExperienceHeaderWord [s t]
\: [s opt]
@@
_educationHeader <-
_xSTART
_xWILD [s star match=( _xWHITE _whtINDENT)]
_xWHITE [s star]
_EducationHeaderWord [s t]
\: [s opt]
@@
_skillsHeader <-
_xSTART
_xWILD [s star match=( _xWHITE _whtINDENT)]
_xWHITE [s star]
_SkillsHeaderWord [s t]
\: [s opt]
@@
_referencesHeader <-
_xSTART
_xWILD [s star match=( _xWHITE _whtINDENT)]
_xWHITE [s star]
_ReferencesHeaderWord [s t]
\: [s opt]
@@
_presentationsHeader <-
_xSTART
_xWILD [s star match=( _xWHITE _whtINDENT)]
_xWHITE [s star]
_PresentationsHeaderWord [s t]
\: [s opt]
@@
_publicationsHeader <-
_xSTART
_xWILD [s star match=( _xWHITE _whtINDENT)]
_xWHITE [s star]
_PublicationsHeaderWord [s t]
\: [s opt]
@@
_otherHeader <-
_xSTART
_xWILD [s star match=( _xWHITE _whtINDENT)]
_xWHITE [s star]
_OtherHeaderWord [s t]
\: [s opt]
@@
#_educationWord <- Education @@
#_educationWord <- Schooling @@
#_experienceWord <- Experience @@
#_experienceWord <- Projects @@
#_otherHeaderWord <- _xWILD [s one matches=(
#Affiliations
#Certificate
#Certificates
# expertise # Too specialized for the moment.
#General
#Information
#Objective
#Objectives
#Patents
#Presentations
#Publications
#References
#Skill
#Skills
# )] @@
|
@PATH _ROOT _headerZone _liGroup _hiGroup
@POST
X("text") = X("text") + N("text");
"hitext.txt" << X("text") << "\n";
@RULES
_xNIL <-
_iGroup
@@
|
@NODES _ROOT
@RULES
_Comment <-
_CommentStart [one] ### (1)
_xWILD [min=0 max=0 fail=("_CommentEnd" "_CommentEnd" "_DoubleHyphen")] ### (2)
_CommentEnd [one] ### (3)
@@
@POST
S("textValue") = N("$text",2) ;
single() ;
@@POST
@RULES
_PubidLiteral <-
\" [one] ### (1)
_xWILD [min=0 max=0 matches=( "_xALPHA" "_xNUM" \ \- \' \( \) \+ \, \. \/ \: \= \? \; \! \* \# \@ \$ \_ \% )] ### (2)
\" [one] ### (3)
@@
_PubidLiteral <-
\' [one] ### (1)
_xWILD [min=0 max=0 matches=( "_xALPHA" _xNUM \ \- \( \) \+ \, \. \/ \: \= \? \; \! \* \# \@ \$ \_ \% )] ### (2)
\' [one] ### (3)
@@
_SystemLiteral <-
\" [one] ### (1)
_xWILD [min=0 max=0 fails=("\"")] ### (2)
\" [one] ### (3)
@@
_SystemLiteral <-
\' [one] ### (1)
_xWILD [min=0 max=0 fails=("'")] ### (2)
\' [one] ### (3)
@@
@@RULES
@RULES
_whiteSpace <-
_xWHITE [plus] ### (1)
@@
@@RULES
|
# Add num as numeric value to concept con's attribute called name.
addnumval(L("con"), L("name"), L("num")); |
@PATH _ROOT _POSTS _NLPPP
@POST
rfaactions(1)
single()
@RULES
#_POSTS [base] <- _ACTION [star] @@
_POSTS [base] <- _STMTS [plus] @@
|
@DECL
LookUpPhrases(L("node")) {
L("this") = 1;
}
@@DECL |
@NODES _LINE
@PRE
<1,1> cap()
@RULES
_major <- _xALPHA [s min=1 max=1 matches=(
EE
CS
EECS
ACCOUNTANCY
ACCOUNTING
ACOUSTICS
ADVERTISING
AERONAUTICS
AESTHETICS
AFRICOLOGY
AGRIBUSINESS
AGRONOMY
ANATOMY
ANTHROPOLOGY
ARCHAEOLOGY
ARCHITECTURE
ART
ASTRONAUTICS
ASTRONOMY
ASTROPHYSICS
AUDIOLOGY
AVIATION
BANKING
BIOCHEMISTRY
BIOENGINEERING
BIOLOGY
BIOPHYSICS
BOTANY
BUSINESS
CHEMISTRY
CHINESE
CLASSICS
CLIMATOLOGY
COMMUNICATIONS
COUNSELING
DEMOGRAPHY
DENTISTRY
DERMATOLOGY
DESIGN
DIETETICS
DRAMA
ECOLOGY
ECONOMETRICS
ECONOMICS
ELECTRONICS
ENGINEERING
ENGLISH
ENTOMOLOGY
EPIDEMIOLOGY
ETHICS
FASHION
FILM
FINANCE
FORESTRY
FRENCH
GENETICS
GEOCHEMISTRY
GEOGRAPHY
GEOLOGY
GEOPHYSICS
GEOSCIENCES
GERMAN
GERONTOLOGY
GOVERNMENT
GREEK
HEBREW
HISTORY
HORTICULTURE
HOSPITALITY
HUMANITIES
IMMUNOLOGY
INSURANCE
ITALIAN
JAPANESE
JOURNALISM
KINESIOLOGY
LANGUAGES
LATIN
LAW
LINGUISTICS
LITERATURE
LITHUANIAN
LOGISTICS
MANAGEMENT
MANUFACTURING
MARKETING
MATH
MATHEMATICS
MECHANICS
MEDIA
MEDICINE
METALS
METEOROLOGY
MICROBIOLOGY
MINERALOGY
MINING
MUSIC
NEMATOLOGY
NEUROSCIENCE
NUTRITION
OCEANOGRAPHY
OPERATIONS
OPTICS
OPTOMETRY
PATHOBIOLOGY
PATHOLOGY
PEDIATRICS
PEDAGOGY
PHARMACEUTICS
PHARMACOLOGY
PHARMACY
PHILOSOPHY
PHYSICS
PHYSIOLOGY
PORTUGUESE
PSYCHOLOGY
RECREATION
REHABILITATION
RELIGION
RUSSIAN
SCHOOL
SCIENCE
SLAVIC
SOCIOLOGY
SPANISH
STATISTICS
TAXATION
TEACHING
TELECOMMUNICATIONS
TELEMEDICINE
TESL
THEATRE
THEATER
THEOLOGY
TOURISM
WRITING
ZOOLOGY
)] @@
|
@PATH _ROOT _TEXTZONE _sent _clause
@POST
L("child") = pndown(N(1));
if (pnname(L("child")) == "_np") {
"bodyAttr.txt" << "\nCOMPOUND\n";
}
"bodyAttr.txt" << N("$text", 1) << "\n" << N("$text", 2) << "\n" << N("$text", 3) << "\n---------------\n";
single();
@RULES
_xNIL <-
_np ### (1)
_vg
_adjc
@@
|
@PATH _ROOT _paragraph _sentence
@POST
S("conj count") = 0;
S("conj")[S("conj count")++] = N("normal",1);
S("conj")[S("conj count")++] = N("normal",3);
S("conj")[S("conj count")++] = N("normal",5);
single();
@RULES
_company <-
_company [s] ### (1)
_conj [s plus] ### (2)
_company [s] ### (3)
_conj [s plus] ### (4)
_company [s] ### (5)
@@ |
@NODES _ROOT
@RULES
_columnHeaders <-
Class ### (1)
_xWILD [plus fails=(\n \r)] ### (2)
_xWILD [one matches=(\n \r)] ### (3)
@@
|
Subsets and Splits