text
stringlengths
22
301k
@NODES _equation @POST "debug.txt" << "addEquationToLabel 1\n"; if (!X("split")) { group(3,3,"_equation"); X("full") = N("label full",2); X("label") = N("label type",2); X("name") = N("label name",2); X("equation") = N("$text",3); addEquationToLabel(N("label name",2),N("$text",3)); } @RULES _xNIL <- _beginEq ### (1) _label ### (2) _xWILD ### (3) _endEq ### (4) @@ @POST "debug.txt" << "addEquationToLabel 2\n"; if (!X("split")) { group(3,3,"_equation"); X("equation") = N("$text",2); } @RULES _xNIL <- _beginEq ### (1) _xWILD ### (2) _endEq ### (3) @@
@NODES _ROOT @POST excise(1,1); noop(); @RULES _xNIL <- _xBLANK ### (1) @@
@NODES _LINE @PRE <1,1> cap(); @RULES _phoneWorkPhrase [] <- _xWILD [min=1 max=1 s layer=(_Work) match=(Office W Work)] \: [s] _xWHITE [star s] _phoneNumber [s] @@
@NODES _ROOT @POST S("text") = N("$text",3); S("type") = "text"; singler(3,3); @RULES _text <- _xWILD [matches=(_section _figure _ref _cite)] ### (1) _NEWLINE [s] ### (2) _xWILD [s plus fails=(_spacing _beginAbs _endAbs _figure _text _ref _cite _BLANKLINE _IGNORE _FOOTER _stopper _equation _equationInline _itemNum _bold _italics)] ### (3) _xWILD [match=(_spacing _beginAbs _endAbs _figure _text _abstract _ref _cite _BLANKLINE _IGNORE _FOOTER _stopper _equation _equationInline _itemNum _bold _italics) lookahead] ### (4) @@ @POST S("text") = N("$text",2); S("type") = "text"; singler(2,2); @RULES _text <- _xWILD [matches=(_section _figure _ref _cite)] ### (1) _xWILD [s plus fails=(_spacing _beginAbs _endAbs _figure _text _ref _cite _BLANKLINE _IGNORE _FOOTER _stopper _equation _equationInline _itemNum _bold _italics)] ### (2) _xWILD [match=(_spacing _beginAbs _endAbs _figure _text _abstract _ref _cite _BLANKLINE _IGNORE _FOOTER _stopper _equation _equationInline _itemNum _bold _italics) lookahead] ### (3) @@
@PATH _ROOT _td @PRE <2,2> uppercase(); @POST X("suffix",2) = strtolower(N("$text",2)); @RULES _xNIL <- \> _xALPHA \< @@
@NODES _ROOT # LINK TO SPECIAL RULE ELEMENTS: # http://visualtext.org/help/NLP_PP_Stuff/Special_rule_elements.htm @RULES _xNIL <- _xSTART ### (1) <==== REQUIRES THE NEXT NODE TO BE THE FIRST _xALPHA ### (2) _xNUM ### (3) _xWILD ### (3) _xWHITE ### (4) _xCAP ### (5) _xCAPLET ### (6) _xLET ### (7) _xEND ### (8) <==== REQUIRES THE PREVIOUS NODE TO BE THE LAST @@
@PATH _ROOT _paragraph _sentence @CHECK ## FIND LAST MATCHING OBJECT S("exit") = 0; N("anaphora") = 0; S("sentence object") = prev(X("object")); if (!N("action")) fail(); # LOOP BACK THROUGH SENTENCES while (!S("exit") && S("sentence object")) { N("object") = down(S("sentence object")); # LOOP THROUGH OBJECTS IN SENTENCE while (!S("exit") && N("object")) { if (strval(N("object"),"action") == N("action")) S("exit") = 1; else N("object") = next(N("object")); } S("sentence object") = prev(S("sentence object")); } if (!N("object")) { "anaphoraEvent.txt" << "Failed: " << phrasetext() << "\n"; fail(); } @POST N("anaphora") = N("object"); S("object") = N("object"); S("normal") = strval(N("object"),"normal"); "anaphoraEvent.txt" << "Anaphora: " << phrasetext() << "\n"; "anaphoraEvent.txt" << " from: " << X("$text") << "\n"; "anaphoraEvent.txt" << " Object: " << conceptname(S("object")) << "\n"; "anaphoraEvent.txt" << " Action: " << N("action") << "\n"; "anaphoraEvent.txt" << "\n"; single(); @RULES _eventAnaphora <- _anaphora [s] ### (1) @@
@NODES _ROOT # Suggested Element Modifers # http://visualtext.org/help/NLP_PP_Stuff/Suggested_element_modifiers.htm # s / singlet : search below a matched node # base : makes the current node the bottom most of the search # unsealed : open up a base node to search underneath @RULES _stillMatches <- of [s] ### (1) @@
@DECL ############## ## FN: XMLHEADER ## SUBJ: Print header tags for XML file. ## RET: None. ############## xmlheader( L("out") # Output stream. ) { L("out") << "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>" << "\n"; } ############## ## FN: XMLSTART ## SUBJ: Print a start XML tag. ## RET: None. ############## xmlstart( L("tag"), # Tag name to use. L("out") # Output stream. ) { L("out") << "<" << L("tag") << ">\n"; } ############## ## FN: XMLEND ## SUBJ: Print an end XML tag. ## RET: None. ############## xmlend( L("tag"), # Tag name to use. L("out") # Output stream. ) { L("out") << "</" << L("tag") << ">\n"; } ############## ## FN: XMLSHORT ## SUBJ: Print a one-liner XML tag. ## RET: None. ############## xmlshort( L("tag"), # Tag name to use. L("val"), # Value to output. L("out") # Output stream. ) { L("out") << "<" << L("tag") << ">" << L("val") << "</" << L("tag") << ">" << "\n"; } @@DECL
@PATH _ROOT _paragraph _sentence @RULES _departmentOf <- department _of @@ _agency <- bureau of alcohol \, [opt] tobacco and firearms @@
@NODES _ROOT #@CHECK #if (N("pattern",1) != "n") # fail(); @PRE <1,1> var("header"); @POST S("text") = N("$text",1); single(); @RULES _header <- _LINE ### (1) @@
@NODES _LINE @PRE <1,1> cap(); @RULES # Ex: Mount _cityMod <- _xWILD [min=1 max=1 s match=("Mount" "City" "Falls" "Port" "Springs" "River" "New" "Saint" "Valley" "Meadow" "Lake" "Forks" "Acres" "Beach" "Bay" "Hill" "Hills" "Park" "North" "East" "South" "West" "Rock" "La" "San" "Square" "Heights" "Green" "Summit" "Ridge" "Run" "Fort" "Bend" "Tree" "Brook" "Shore" "Shores" "Alto" "St" "Mt" "Gap" "Haven" "Dam" "Union" "Grove" "York" "Groves" "Plains" "Estates")] @@
############################################### # FILE: Numeric Lists.pat # # SUBJ: Recognized coordinate NPs built from # # numbers # # AUTH: Paul Deane # # CREATED: 01/Mar/01 # DATE OF THIS VERSION: 31/Aug/01 # # Copyright ############################################### @NODES _ROOT @RULES @POST S("Numeral Value")[0] = N("Numeral Value",1); S("MaxArrayPos") = 1; S("MaxValue") = N("MaxArrayPos",4)+1 ; S("CurrentValue") = 0 ; while ( S("MaxArrayPos") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",4)[S("CurrentValue")] ; S("MaxArrayPos")++; S("CurrentValue")++; } S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",4)[S("CurrentValue")] ; S("MaxArrayPos") = S("MaxArrayPos"); single(); @@POST @RULES _cardinalList <- _cardinalNumeral [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _cardinalList [one] @@ @@RULES @POST S("Numeral Value")[0] = num(N("$text",1)); S("MaxArrayPos") = 1; S("MaxValue") = N("MaxArrayPos",4) ; S("CurrentValue") = 0 ; while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value")[S("CurrentValue")] ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",4)[S("CurrentValue")] ; S("MaxArrayPos") = S("MaxArrayPos"); single(); @@POST @RULES _cardinalList <- _xNUM [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _cardinalList [one] @@ @@RULES @POST #add the sequence of values explicitly to the array for the first sequence S("Numeral Value")[0] = N("Numeral Value",1)[0]; S("MaxArrayPos") = 1; S("MinValue") = N("Numeral Value",1)[0] ; S("CurrentValue") = N("Numeral Value",1)[0] + 1 ; S("MaxValue") = N("Numeral Value",1)[1]; if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",1)[1] ; } else { S("Numeral Value")[1] = N("Numeral Value",1)[1]; S("MaxArrayPos") = 1; } S("FirstMax") = S("MaxArrayPos"); #add the sequence of values to the array for the list S("MaxArrayPos") = 0; S("MinValue") = 0 ; S("MaxValue") = N("MaxValue",4)+1; while ( S("MaxArrayPos") < S("MaxValue") ) { S("Numeral Value")[S("FirstMax") + S("MaxArrayPos")] = N("Numeral Value",4)[S("MaxArrayPos")] ; S("MaxArrayPos")++; } S("Numeral Value")[S("FirstMax") + S("MaxArrayPos")] = N("Numeral Value",4)[S("MaxArrayPos")] ; S("MaxArrayPos") = S("FirstMax") + S("MaxArrayPos"); single(); @@POST @RULES _cardinalList <- _cardinalSequence [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _cardinalList [one] @@ @@RULES @POST #add the sequence of values explicitly to the array for the first sequence S("Numeral Value")[0] = N("Numeral Value",1)[0]; S("MaxArrayPos") = 1; S("MinValue") = N("Numeral Value",1)[0] ; S("CurrentValue") = N("Numeral Value",1)[0] + 1 ; S("MaxValue") = N("Numeral Value",1)[1]; if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",1)[1] ; } else { S("Numeral Value")[1] = N("Numeral Value",1)[1]; S("MaxArrayPos") = 1; } S("MaxArrayPos") = S("MaxArrayPos")+1; #add the sequence of values explicitly to the array for the second sequence S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",6)[0]; S("MaxArrayPos")++; S("MinValue") = N("Numeral Value",6)[0] ; S("CurrentValue") = N("Numeral Value",6)[0] + 1 ; S("MaxValue") = N("Numeral Value",6)[1]; if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",6)[1] ; } else { S("Numeral Value")[2] = N("Numeral Value",6)[1]; S("MaxArrayPos") = 2; } S("MaxArrayPos") = S("MaxArrayPos")+1; single(); @@POST @RULES _cardinalList <- _cardinalSequence [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _cardinalSequence [one] @@ @@RULES @POST #add the lone numeral S("Numeral Value")[0] = N("Numeral Value",1); #add the sequence of values explicitly to the array S("Numeral Value")[1] = N("Numeral Value",6)[0]; S("MaxArrayPos") = 2; S("MinValue") = N("Numeral Value",6)[0] ; S("CurrentValue") = N("Numeral Value",6)[0] + 1 ; S("MaxValue") = N("Numeral Value",6)[1]; if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } } else { S("Numeral Value")[2] = N("Numeral Value",6)[1]; S("MaxArrayPos") = 2; } single(); @@POST @RULES _cardinalList <- _cardinalNumeral [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _cardinalSequence [one] @@ @@RULES @POST #add the lone numeral S("Numeral Value")[0] = num(N("$text",1)); #add the sequence of values explicitly to the array S("Numeral Value")[1] = N("Numeral Value",6)[0]; S("MaxArrayPos") = 2; S("MinValue") = N("Numeral Value",6)[0] ; S("CurrentValue") = N("Numeral Value",6)[0] + 1 ; S("MaxValue") = N("Numeral Value",6)[1]; if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } } else { S("Numeral Value")[2] = N("Numeral Value",6)[1]; S("MaxArrayPos") = 2; } S("MaxArrayPos") = S("MaxArrayPos")+1; single(); @@POST @RULES _cardinalList <- _xNUM [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _cardinalSequence [one] @@ @@RULES @POST S("Numeral Value")[0] = N("Numeral Value",1)[0]; S("MaxArrayPos") = 1; S("MinValue") = N("Numeral Value",1)[0] ; S("CurrentValue") = N("Numeral Value",1)[0] + 1 ; S("MaxValue") = N("Numeral Value",1)[1]; #add the sequence of values explicitly to the array if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } } else { S("Numeral Value")[1] = N("Numeral Value",1)[1]; S("MaxArrayPos") = 0; } #add the lone numeral S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",6); single(); @@POST @RULES _cardinalList <- _cardinalSequence [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _cardinalNumeral [one] @@ @@RULES @POST S("Numeral Value")[0] = N("Numeral Value",1)[0]; S("MaxArrayPos") = 1; S("MinValue") = N("Numeral Value",1)[0] ; S("CurrentValue") = N("Numeral Value",1)[0] + 1 ; S("MaxValue") = N("Numeral Value",1)[1]; #add the sequence of values explicitly to the array if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",1)[1] ; } else { S("Numeral Value")[1] = N("Numeral Value",1)[1]; S("MaxArrayPos") = 1; } #add the lone numeral S("Numeral Value")[S("MaxArrayPos")] = num(N("$text",6)); single(); @@POST @RULES _cardinalList <- _cardinalSequence [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xNUM [one] @@ @@RULES @POST S("Numeral Value")[0] = N("Numeral Value",1); S("Numeral Value")[1] = N("Numeral Value",6); S("MaxArrayPos") = 1; single(); @@POST @RULES _cardinalList <- _cardinalNumeral [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _cardinalNumeral [one] @@ @@RULES @POST S("Numeral Value")[0] = num(N("$text",1)); S("Numeral Value")[1] = num(N("$text",6)); S("MaxArrayPos") = 1; single(); @@POST @RULES _cardinalList <- _xNUM [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xNUM [one] @@ @@RULES @POST S("Numeral Value")[0] = N("Numeral Value",1); S("MaxArrayPos") = 1; S("MaxValue") = N("MaxArrayPos",4) ; S("CurrentValue") = 0 ; while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value")[S("CurrentValue")] ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value")[S("CurrentValue")] ; S("MaxArrayPos") = S("MaxArrayPos"); single(); @@POST @RULES _ordinalList <- _ordinalNumeral [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _ordinalList [one] @@ @@RULES @POST #add the sequence of values explicitly to the array for the first sequence S("Numeral Value")[0] = N("Numeral Value",1)[0]; S("MaxArrayPos") = 1; S("MinValue") = N("Numeral Value",1)[0] ; S("CurrentValue") = N("Numeral Value",1)[0] + 1 ; S("MaxValue") = N("Numeral Value",1)[1]; if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",1)[1] ; } else { S("Numeral Value")[1] = N("Numeral Value",1)[1]; S("MaxArrayPos") = 1; } S("FirstMax") = S("MaxArrayPos"); #add the sequence of values to the array for the list S("MaxArrayPos") = 0; S("MinValue") = 0 ; S("MaxValue") = N("MaxValue",4)+1; while ( S("MaxArrayPos") < S("MaxValue") ) { S("Numeral Value")[S("FirstMax") + S("MaxArrayPos")] = N("Numeral Value",4)[S("MaxArrayPos")] ; S("MaxArrayPos")++; } S("Numeral Value")[S("FirstMax") + S("MaxArrayPos")] = N("Numeral Value",4)[S("MaxArrayPos")] ; S("MaxArrayPos") = S("FirstMax") + S("MaxArrayPos"); single(); @@POST @RULES _ordinalList <- _ordinalSequence [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _ordinalList [one] @@ @@RULES @POST #add the sequence of values explicitly to the array for the first sequence S("Numeral Value")[0] = N("Numeral Value",1)[0]; S("MaxArrayPos") = 1; S("MinValue") = N("Numeral Value",1)[0] ; S("CurrentValue") = N("Numeral Value",1)[0] + 1 ; S("MaxValue") = N("Numeral Value",1)[1]; if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",1)[1] ; } else { S("Numeral Value")[1] = N("Numeral Value",1)[1]; S("MaxArrayPos") = 1; } S("MaxArrayPos") = S("MaxArrayPos")+1; #add the sequence of values explicitly to the array for the second sequence S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",6)[0]; S("MaxArrayPos")++; S("MinValue") = N("Numeral Value",6)[0] ; S("CurrentValue") = N("Numeral Value",6)[0] + 1 ; S("MaxValue") = N("Numeral Value",6)[1]; if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",6)[1] ; } else { S("Numeral Value")[2] = N("Numeral Value",6)[1]; S("MaxArrayPos") = 2; } S("MaxArrayPos") = S("MaxArrayPos")+1; single(); @@POST @RULES _ordinalList <- _ordinalSequence [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _ordinalSequence [one] @@ @@RULES @POST #add the lone numeral S("Numeral Value")[0] = N("Numeral Value",1); #add the sequence of values explicitly to the array S("Numeral Value")[1] = N("Numeral Value",6)[0]; S("MaxArrayPos") = 2; S("MinValue") = N("Numeral Value",6)[0] ; S("CurrentValue") = N("Numeral Value",6)[0] + 1 ; S("MaxValue") = N("Numeral Value",6)[1]; if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } } else { S("Numeral Value")[2] = N("Numeral Value",6)[1]; S("MaxArrayPos") = 2; } S("MaxArrayPos") = S("MaxArrayPos")+1; single(); @@POST @RULES _ordinalList <- _ordinalNumeral [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _ordinalSequence [one] @@ @@RULES @POST S("Numeral Value")[0] = N("Numeral Value",1)[0]; S("MaxArrayPos") = 1; S("MinValue") = N("Numeral Value",1)[0] ; S("CurrentValue") = N("Numeral Value",1)[0] + 1 ; S("MaxValue") = N("Numeral Value",1)[1]; #add the sequence of values explicitly to the array if ( S("MinValue") < S("MaxValue")) { while ( S("MinValue") < S("MaxValue") ) { S("Numeral Value")[S("MaxArrayPos")] = S("CurrentValue") ; S("MaxArrayPos")++; S("MinValue")++; S("CurrentValue")++; } } else { S("Numeral Value")[1] = N("Numeral Value",1)[1]; S("MaxArrayPos") = 1; } #add the lone numeral S("Numeral Value")[S("MaxArrayPos")] = N("Numeral Value",6); single(); @@POST @RULES _ordinalList <- _ordinalSequence [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _ordinalNumeral [one] @@ @@RULES @POST S("Numeral Value")[0] = N("Numeral Value",1); S("Numeral Value")[1] = N("Numeral Value",6); S("MaxArrayPos") = 1; single(); @@POST @RULES _ordinalList <- _ordinalNumeral [one] \, [one] _xWILD [opt match=(_xWHITE "_whiteSpace")] _xWILD [one matches=("&" "and" "or")] _xWILD [opt match=(_xWHITE "_whiteSpace")] _ordinalNumeral [one] @@ @@RULES
@CODE L("hello") = 0; @@CODE #@PATH _ROOT _TEXTZONE _tok @NODES _tok # num . num @POST xrename("_num"); X("num") = N("$text",2) + "." + N("$text",4); @RULES _xNIL <- _xSTART _xNUM \. _xNUM _xEND @@ # num / num @POST xrename("_num"); X("num") = 1; X("fraction") = 1; @RULES _xNIL <- _xSTART _xNUM \/ _xNUM _xEND @@ # Note: Can redo pos in later pass for street numbers and # proper nouns. @POST xrename("_num"); X("num") = 1; # singular. X("ordinal") = 1; chpos(X(),"JJ"); # Conform Treebank. @RULES _xNIL <- _xSTART _xWILD [plus fail=(st nd rd th)] _xWILD [one match=(st nd rd th)] _xEND @@ # cap & cap # cap / cap @POST xrename("_noun"); chpos(X(),"NP"); X("cap") = 1; @RULES _xNIL <- _xSTART _xCAP _xWILD [one match=( \& \/ )] _xCAP _xWILD [star match=(_xPUNCT _xCAP)] _xEND @@ # - num # Could be suffix of alpha-num type noun. @POST xrename("_num"); X("num") = N("$text",3); @RULES _xNIL <- _xSTART \- _xNUM _xEND @@ # Note: some misparsed treebank tokens. @POST if (strisupper(N("$text",9))) xrename("_noun"); else xrename("_adj"); X("sem") = "us_state"; X("cap") = 1; @RULES _xNIL <- _xSTART _xCAP \. _xCAP [opt] \. [opt] _xCAP [opt] \. [opt] \- _xALPHA _xEND @@ # US $ @POST xrename("_money"); @RULES _xNIL <- _xSTART US \$ _xEND @@ # `` # '' @POST ++G("dbl quotes"); xrename("_dblquote"); X("nopos") = 1; @RULES _xNIL <- \` \` @@ _xNIL <- \' \' @@ # Mishandled by pretagged. @POST xrename("_modal"); chpos(X(),"VBP"); X("neg") = 1; @RULES _xNIL <- _xSTART _xWILD [one match=( can doesn don mayn mightn mustn shan won )] \' t _xEND @@ @POST xrename("_modal"); chpos(X(),"VBD"); X("neg") = 1; @RULES _xNIL <- _xSTART _xWILD [one match=( couldn didn shouldn wouldn )] \' t _xEND @@ # Pre-tagged artifact. # n't # n ' t @POST xrename("_adv"); chpos(X(),"RB"); X("neg") = 1; X("tok") = "n't"; @RULES _xNIL <- n [s] \' [s] t [s] @@ # Some abbreviations. @PRE <2,2> cap(); # 10/14/06 AM. @POST xrename("_noun"); chpos(X(),"NP"); X("abbrev") = 1; X("cap") = 1; # 5/25/06 AM. single(); @RULES _month <- _xSTART _xWILD [one match=(Jan Feb Mar Apr May Jun Jul Aug Sep Sept Oct Nov Dec)] \. [opt] _xEND @@ _title <- _xSTART _xWILD [one match=(mr mrs dr messrs sen rep)] \. _xEND @@ _abbr <- _xSTART _xWILD [one match=(jr st)] \. _xEND @@ _companyDESIG <- _xSTART _xWILD [one match=(co corp inc ltd llc bancorp)] \. [opt] _xEND @@ #n.v. _companyDESIG <- # 17 _xSTART n \. [opt] v \. [opt] _xEND @@ @POST xrename("_noun"); chpos(X(),"NN"); X("abbrev") = 1; X("cap") = 1; single(); @RULES _noabbr <- _xSTART _xWILD [one match=( No )] \. [opt] _xEND @@ # Some common contractions. # 're @POST X("stem") = X("sem") = "be"; X("number") = 1; # X("-ed") = 1; group(2,3,"_be"); xrename("_verb"); chpos(X(),"VBP"); @RULES _xNIL <- _xSTART \' re _xEND @@ # letter . # alpha . @PRE <2,2> length(1); @POST xrename("_letabbr"); X("cap") = 1; # 04/21/07 AM. @RULES _xNIL <- # 20 _xSTART _xALPHA \. _xEND @@ # alpha * @POST # xrename("_noun"); # pncopyvars(N(2),X()); X("text") = N("$text",2); X("punct end") = "*"; excise(3,3); # if (strisupper(X("text"))) # chpos(X(),"NP"); @RULES _xNIL <- _xSTART _xALPHA \* _xEND @@ # num s @POST xrename("_noun"); chpos(X(),"NNS"); # Treebank 5/2 NNS/CD. X("id") = "tok50 num-s"; @RULES _xNIL <- _xSTART _xNUM s _xEND @@ # num , num , num @PRE <4,4> length(3); <6,6> length(3); @POST xrename("_num"); chpos(X(),"CD"); X("id") = "tok50 num,num,num"; @RULES _xNIL <- _xSTART _xNUM \, _xNUM \, _xNUM _xEND @@ # num , num @PRE <4,4> length(3); @POST xrename("_num"); chpos(X(),"CD"); X("id") = "tok50 num,num"; X("number") = "plural"; @RULES _xNIL <- _xSTART _xNUM \, _xNUM _xEND @@ # US States. @POST xrename("_noun"); X("sem") = "us_state"; chpos(X(),"NP"); if (N(4)) X("pos eos") = 1; X("id") = "tok50 us-state"; X("cap") = 1; single(); @RULES _usstate <- n \. _xWILD [one match=(c d h j m y)] \. [opt] @@ @POST xrename("_noun"); X("sem") = "us_state"; chpos(X(),"NP"); if (N(4)) X("pos eos") = 1; X("id") = "tok50 us-state"; X("cap") = 1; single(); @RULES _usstate <- s \. _xWILD [one match=(c d)] \. [opt] @@ # US States @POST xrename("_noun"); X("sem") = "us_state"; chpos(X(),"NP"); if (N(2)) X("pos eos") = 1; X("id") = "tok50 us-state"; single(); @RULES _usstate <- _xSTART _xWILD [one match=( AA AE AK AL Ala Alab Alabama Alas Alaska AP AR Ari Ariz Arizona Ark Arkan Arkansas AS AZ CA Cal Calif California CO Col Colo Colorado Columbia Conn Connecticut CT Dak Dakota DC DE Del Dela Delaware FL Fla # 09/07/06 AM. Flo Flor Florida FM GA Geo Georg Georgia GU Guam Haw Hawaii HI IA ID Ida Idaho IL Ill Illin Illinois IN Ind Indiana Iowa Kan Kans Kansas Ken Kent Kentucky KS KY LA Louis Louisiana MA Maine Mary Maryland Mass Massachusetts MD ME MH MI Mich Michigan Micronesia Minn Minnesota Miss Mississippi Missouri MN MO Mon Mont Montana MP MS MT NC ND NE Neb Nebraska Nev Nevada NH NJ NM NSW NV NY OH Ohio OK Okl Okla Oklahoma OR Ore Oreg Oregon Orpalau PA Penn Pennsyl Pennsylvania PR PW RI Samoa SC SD Tenn Tennessee Tex Texas TN TX UT Utah VA Ver Verm Vermont #VI Vir Virg Virgin Virginia VT WA Wash Washington WI Wis Wisc Wiscon Wisconsin WV WY Wyo Wyoming )] \. [opt] _xEND @@ # Note: Some abbreviations. @CHECK L("t") = strtolower(N("$text",2)); if (spellword(L("t"))) fail(); @POST xrename("_noun"); X("cap") = 1; # 04/21/07 AM. @RULES _xNIL <- # 20 _xSTART _xCAP \. _xEND @@ # Put some semantics on. # @POST if (G("conform treebank")) { chpos(X(),"NN"); N("bracket") = 1; } N("sem") = "date"; N("advl") = 1; # Possible standalone adverbial. L("xx") = pnparent(X()); # 07/10/12 AM. # Some temporal evidence. L("t") = strtolower(N("$text")); if (L("t") == "today") pnreplaceval(L("xx"),"date=present",1); # 07/10/12 AM. else if (L("t") = "yesterday") pnreplaceval(L("xx"),"date=past",1); # 07/10/12 AM. else pnreplaceval(L("xx"),"date=future",1); # 07/10/12 AM. # General date references. pnreplaceval(L("xx"),"date ref",1); # 07/10/12 AM. @RULES _xNIL <- _xWILD [s one match=( today yesterday tomorrow )] @@ @POST N("sem") = "date"; L("xx") = pnparent(X()); # 07/10/12 AM. pnreplaceval(L("xx"),"date ref",1); # 07/10/12 AM. @RULES _xNIL <- _xWILD [s one match=( millennium millennia millenniums century centuries decade decades year years month months week weeks day days evening evenings night nights morning mornings noon noons afternoon afternoons hour hours minute minutes second seconds time date season # 01/12/05 AM. holiday holidays quarter quarters # ambig, of course. )] @@
@NODES _ROOT @POST if (num(N("words")) && num(N("words")) == num(N("caps"))) { L("phrase") = strtrim(strsubst(strtolower(N("$text",1)),":",0)); "output.txt" << L("phrase") << "\n"; L("con") = getconcept(G("heads"),L("phrase")); IncrementCount(L("con"),"count"); addstrval(L("con"),"file",G("$inputname")); single(); } @RULES _header <- _LINE ### (1) @@
@DECL ############################################### # General functions ############################################### AddUniqueCon(L("concept"),L("name")) { L("con") = findconcept(L("concept"),L("name")); if (!L("con")) L("con") = makeconcept(L("concept"),L("name")); return L("con"); } AddUniqueStr(L("concept"),L("attr"),L("value")) { if (L("value")) { L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("str") = getstrval(L("val")); if (L("str") == L("value")) return 0; L("val") = nextval(L("val")); } addstrval(L("concept"),L("attr"),L("value")); return 1; } return 0; } AddUniqueNum(L("concept"),L("attr"),L("value")) { if (L("value")) { L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("num") = getnumval(L("val")); if (L("num") == L("value")) return 0; L("val") = nextval(L("val")); } addnumval(L("concept"),L("attr"),L("value")); return 1; } return 0; } AddUniqueConVal(L("concept"),L("attr"),L("value")) { "unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n"; L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("con") = getconval(L("val")); "unique.txt" << conceptname(L("con")) << "\n"; if (conceptpath(L("con")) == conceptpath(L("value"))) return 0; L("val") = nextval(L("val")); } addconval(L("concept"),L("attr"),L("value")); return 1; } PathToConcept(L("parent"),L("hier")) { L("cons") = split(L("hier")," "); L("i") = 0; L("con") = L("parent"); while (L("cons")[L("i")]) { L("c") = L("cons")[L("i")]; L("name") = strsubst(L("c"),"\"",0); if (L("name") != "concept") L("con") = AddUniqueCon(L("con"),L("name")); L("i")++; } return L("con"); } CopyAttr(L("from"),L("to"),L("attr")) { L("from value") = strval(L("from"),L("attr")); if (L("from value")) { L("to value") = strval(L("to"),L("attr")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr"),L("from value")); } } CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) { L("from value") = strval(L("from"),L("attr from")); if (L("from value")) { L("to value") = strval(L("to"),L("attr to")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr to"),L("from value")); } } CopyConAttr(L("from"),L("to"),L("attr")) { L("from value") = conval(L("from"),L("attr")); if (L("from value")) { L("to value") = conval(L("to"),L("attr")); if (L("from value") && !L("to value")) addconval(L("to"),L("attr"),L("from value")); } } AttrValues(L("con"),L("attr")) { L("at") = findattr(L("con"),L("attr")); if (L("at")) return attrvals(L("at")); return 0; } ValCount(L("vals")) { while (L("vals")) { L("count")++; L("vals") = nextval(L("vals")); } return L("count"); } LastChild(L("parent")) { L("child") = down(L("parent")); while (L("child")) { L("last") = L("child"); L("child") = next(L("child")); } return L("last"); } MakeCountCon(L("con"),L("count name")) { L("count name") = CountName(L("con"),L("count name")); return makeconcept(L("con"),L("count name")); } IncrementCount(L("con"),L("countname")) { L("count") = numval(L("con"),L("countname")); if (L("count")) { L("count") = L("count") + 1; replaceval(L("con"),L("countname"),L("count")); } else { addnumval(L("con"),L("countname"),1); L("count") = 1; } return L("count"); } CountName(L("con"),L("root")) { L("count") = IncrementCount(L("con"),L("root")); return L("root") + str(L("count")); } StripEndDigits(L("name")) { if (strisdigit(L("name"))) return 0; L("len") = strlength(L("name")) - 1; L("i") = L("len") - 1; L("str") = strpiece(L("name"),L("i"),L("len")); while (strisdigit(L("str")) && L("i")) { L("i")--; L("str") = strpiece(L("name"),L("i"),L("len")); } return strpiece(L("name"),0,L("i")); } ############################################### # KB Dump Functins ############################################### DumpKB(L("con"),L("file")) { L("dir") = G("$apppath") + "/kb/"; L("filename") = L("dir") + L("file") + ".kb"; if (!kbdumptree(L("con"),L("filename"))) { "kb.txt" << "FAILED dump: " << L("filename") << "\n"; } else { "kb.txt" << "DUMPED: " << L("filename") << "\n"; } } TakeKB(L("filename")) { L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb"; "kb.txt" << "Taking: " << L("path") << "\n"; if (take(L("path"))) { "kb.txt" << " Taken successfully: " << L("path") << "\n"; } else { "kb.txt" << " Taken FAILED: " << L("path") << "\n"; } } ChildCount(L("con")) { L("count") = 0; L("child") = down(L("con")); while (L("child")) { L("count")++; L("child") = next(L("child")); } return L("count"); } ############################################### # KBB DISPLAY FUNCTIONS ############################################### ############################################### # display type: # 0 compact with ellipses on long attr values # 1 full, more spread out # 2 compact without ellipses on long attr values ############################################### SaveKB(L("file"),L("top con"),L("display type")) { DisplayKBRecurse(L("file"),L("top con"),0,L("display type")); L("file") << "\n"; return L("top con"); } DisplayKB(L("top con"),L("display type")) { L("file") = DisplayFileName(); DisplayKBRecurse(L("file"),L("top con"),0,L("display type")); L("file") << "\n"; return L("top con"); } KBHeader(L("text")) { L("file") = DisplayFileName(); L("file") << "#######################\n"; L("file") << "# " << L("text") << "\n"; L("file") << "#######################\n\n"; } DisplayFileName() { if (num(G("$passnum")) < 10) { L("file") = "ana00" + str(G("$passnum")); }else if (num(G("$passnum")) < 100) { L("file") = "ana0" + str(G("$passnum")); } else { L("file") = "ana" + str(G("$passnum")); } L("file") = L("file") + ".kbb"; return L("file"); } DisplayKBRecurse(L("file"),L("parent"),L("level"),L("display type")) { if (L("level") == 0) { L("file") << conceptname(L("parent")) << "\n"; } L("con") = down(L("parent")); while (L("con")) { L("file") << SpacesStr(L("level")+1) << conceptname(L("con")); DisplayAttributes(L("file"),L("con"),L("display type"),L("level")); L("file") << "\n"; if (down(L("con"))) { L("lev") = 1; DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("display type")); } L("con") = next(L("con")); } } DisplayAttributes(L("file"),L("con"),L("display type"),L("level")) { L("attrs") = findattrs(L("con")); if (L("attrs")) L("file") << ": "; if (L("display type") == 1 && L("attrs")) L("file") << "\n"; L("first attr") = 1; while (L("attrs")) { L("vals") = attrvals(L("attrs")); L("count") = ValCount(L("vals")); if (L("display type") != 1 && !L("first attr")) { L("file") << ", "; } if (L("display type") == 1) { if (!L("first attr")) L("file") << "\n"; L("file") << SpacesStr(L("level")+2); } L("name") = attrname(L("attrs")); L("file") << QuoteIfNeeded(L("name")) << "="; L("first") = 1; L("type") = attrtype(L("con"),L("name")); while (L("vals")) { if (!L("first")) L("file") << ","; else if (L("count") > 1) L("file") << "["; if (L("type") == 1) { L("num") = getnumval(L("vals")); L("file") << str(L("num")); } else if (L("type") == 2) { if (L("first")) L("file") << "["; L("c") = getconval(L("vals")); L("file") << conceptpath(L("c")); } else if (L("type") == 3) { L("flt") = getfltval(L("vals")); L("file") << str(L("flt")); } else { L("val") = getstrval(L("vals")); if (L("display type") == 0 && strlength(L("val")) > 20) { L("shorty") = strpiece(L("val"),0,20); L("val") = L("shorty") + "..."; } L("file") << QuoteIfNeeded(str(L("val"))); } L("first") = 0; L("vals") = nextval(L("vals")); } if (L("type") == 2 || L("count") > 1) L("file") << "]"; L("first attr") = 0; L("attrs") = nextattr(L("attrs")); } } QuoteIfNeeded(L("str")) { if (!L("str")) return 0; L("new") = L("str"); if (strcontains(" ",L("str")) || strcontains("[",L("str")) || strcontains("]",L("str"))) L("new") = "\"" + L("new") + "\""; return L("new"); } # Because NLP++ doesn't allow for empty strings, # this function can only be called with "num" >= 1 SpacesStr(L("num")) { L("n") = 1; L("spaces") = " "; while (L("n") < L("num")) { L("spaces") = L("spaces") + " "; L("n")++; } return L("spaces"); } PadStr(L("num str"),L("pad str"),L("pad len")) { L("len") = strlength(L("num str")); L("pad") = 0; L("to pad") = L("pad len") - L("len"); while (L("i")++ < L("to pad")) { L("pad") = L("pad") + L("pad str"); } L("padded") = L("pad") + L("num str"); return L("padded"); } ############################################### # DICTIONARY FUNCTIONS ############################################### DictionaryStart() { G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb"; G("attrs") = openfile(G("attrs path")); } DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) { addword(L("word")); addword(L("attrName")); G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n"; G("attrs") << findwordpath(L("attrName")) << "\n"; if (L("attrType") == "str") G("attrs") << "pst\n" << "\"" << L("value") << "\""; else if (L("attrType") == "num") G("attrs") << "pnum\n" << str(L("value")); else if (L("attrType") == "con") G("attrs") << "pcon\n" << conceptpath(L("value")); G("attrs") << "\nend ind\n\n"; } DictionaryEnd() { G("attrs") << "\nquit\n\n"; closefile(G("attrs")); } @@DECL
@CODE DisplayKB(G("RadLex"), 1); @@CODE
@NODES _StartingTag @RULES _Attribute <- _xWILD [s one matches=("_xALPHA" "_" ":")] ### (1) _xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (2) _whiteSpace [opt] ### (3) \= [one] ### (4) _whiteSpace [opt] ### (5) _PubidLiteral [one] ### (6) @@ _Attribute <- _xWILD [s one matches=("_xALPHA" "_" ":")] ### (1) _xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (2) _whiteSpace [opt] ### (3) \= [one] ### (4) _whiteSpace [opt] ### (5) _SystemLiteral [one] ### (6) @@ @@RULES
@NODES _LINE @RULES _honors <- graduated [s opt] _xWHITE [s opt] with [s opt] _xWHITE [s opt] _xWILD [s t min=1 max=1 matches=( honors )] @@ _honors <- magna [s] _xWHITE [s] cum [s] _xWHITE [s] laude [s] @@ _honors <- summa [s] _xWHITE [s] cum [s] _xWHITE [s] laude [s] @@ _honors <- cum [s] _xWHITE [s] laude [s] @@ _honors <- dean [s] \' [s] s [s] _xWHITE [s] list [s] @@ _honors <- graduate [s opt] _xWHITE [s opt] fellowship [s t] @@ _honors <- _xWILD [s min=1 max=1 matches=( valedictorian salutatorian )] @@
# Check if a string is is terminated by the given substring @POST if (!strendswith(N("$text",1),"ing")) fail(); @RULES _ving <- _verb @@
@CODE DisplayKB(G("dict"),0); L("con") = down(G("dict")); L("vals") = AttrValues(L("con"),"pos"); L("last") = 0; while (L("con")) { L("word") = conceptname(L("con")); L("letter") = strpiece(L("word"),0,0); if (!L("last") || L("letter") != L("last")) { L("filename") = str(DictionaryFilename(L("word"),"pos")); "filename.txt" << L("filename") << "\n"; L("output") = openfile(L("filename"),"app"); L("last") = L("letter"); } if (L("vals")) { L("output") << L("word"); while (L("vals")) { L("output") << " " << getsval(L("vals")); L("vals") = nextval(L("vals")); } } L("con") = next(L("con")); if (L("con")) { L("vals") = AttrValues(L("con"),"pos"); if (L("vals")) L("output") << "\n"; } } @@CODE
@PATH _ROOT _attr _attrConcept _LINE @POST X("attribute",2) = N("word",1); @RULES _xNIL <- _string _xEND @@
# Replace named attribute's value(s) with str. replaceval(L("con"), L("name"), L("str"));
# Match _det _quan _adj _noun nodes, starting with _noun. Reduce to _np @RULES _np <- _det _quan _adj _noun [trigger] @@
@CODE G("format") = getconcept(findroot(),"format"); G("caps") = getconcept(findroot(),"caps"); G("names") = getconcept(findroot(),"names"); @@CODE
# Check if the entire string is uppercase @CHECK if (striscaps(N("$text",1))) fail(); @RULES _name <- will @@
@NODES _ROOT @RULES _xNIL <- the ### (1) @@
@NODES _cities @POST L("cities") = getconcept(G("state"),"cities"); L("city") = makeconcept(L("cities"),N("$text",1)); L("num") = strsubst(N("$text",3),",",0); addnumval(L("city"),"population",num(L("num"))); G("cities") << strtolower(N("$text",1)) << " s=city state=" << QuoteIfNeeded(conceptname(G("state"))) << "\n"; single(); @RULES _city <- _xWILD [plus match=(_xALPHA)] ### (1) \( ### (2) _xWILD [plus fail=(\))] ### (3) \) ### (4) @@
@NODES _ROOT @POST L("text") = conceptname(pnvar(N(1), "code")); if (strcontains("-", L("text"))) { L("range") = split(L("text"), "-"); S("class_start") = L("range")[0]; L("class_end") = L("range")[1]; L("class_end") = split(L("class_end"), "."); S("class_end") = L("class_end")[0]; makeconcept(pnvar(N(1), "code")) merge(); } noop(); @RULES _class <- _entry ### (1) @@
@NODES _LINE @POST singler(2,2); @RULES _item <- _xSTART ### (1) _xWILD [fail=(_comma)] ### (2) @@ @POST singler(2,2); @RULES _item <- _comma ### (1) _xWILD [plus fail=(_item _comma)] ### (2) @@
@DECL ############################################### # General functions ############################################### AddUniqueCon(L("concept"),L("name")) { L("con") = findconcept(L("concept"),L("name")); if (!L("con")) L("con") = makeconcept(L("concept"),L("name")); return L("con"); } AddUniqueStr(L("concept"),L("attr"),L("value")) { if (L("value")) { L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("str") = getstrval(L("val")); if (L("str") == L("value")) return 0; L("val") = nextval(L("val")); } addstrval(L("concept"),L("attr"),L("value")); return 1; } return 0; } AddUniqueNum(L("concept"),L("attr"),L("value")) { if (L("value")) { L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("num") = getnumval(L("val")); if (L("num") == L("value")) return 0; L("val") = nextval(L("val")); } addnumval(L("concept"),L("attr"),L("value")); return 1; } return 0; } AddUniqueConVal(L("concept"),L("attr"),L("value")) { "unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n"; L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("con") = getconval(L("val")); "unique.txt" << conceptname(L("con")) << "\n"; if (conceptpath(L("con")) == conceptpath(L("value"))) return 0; L("val") = nextval(L("val")); } addconval(L("concept"),L("attr"),L("value")); return 1; } PathToConcept(L("parent"),L("hier")) { L("cons") = split(L("hier")," "); L("i") = 0; L("con") = L("parent"); while (L("cons")[L("i")]) { L("c") = L("cons")[L("i")]; L("name") = strsubst(L("c"),"\"",0); if (L("name") != "concept") L("con") = AddUniqueCon(L("con"),L("name")); L("i")++; } return L("con"); } CopyAttr(L("from"),L("to"),L("attr")) { L("from value") = strval(L("from"),L("attr")); if (L("from value")) { L("to value") = strval(L("to"),L("attr")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr"),L("from value")); } } CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) { L("from value") = strval(L("from"),L("attr from")); if (L("from value")) { L("to value") = strval(L("to"),L("attr to")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr to"),L("from value")); } } CopyConAttr(L("from"),L("to"),L("attr")) { L("from value") = conval(L("from"),L("attr")); if (L("from value")) { L("to value") = conval(L("to"),L("attr")); if (L("from value") && !L("to value")) addconval(L("to"),L("attr"),L("from value")); } } AttrValues(L("con"),L("attr")) { L("at") = findattr(L("con"),L("attr")); if (L("at")) return attrvals(L("at")); return 0; } ValCount(L("vals")) { while (L("vals")) { L("count")++; L("vals") = nextval(L("vals")); } return L("count"); } LastChild(L("parent")) { L("child") = down(L("parent")); while (L("child")) { L("last") = L("child"); L("child") = next(L("child")); } return L("last"); } MakeCountCon(L("con"),L("count name")) { L("count name") = CountName(L("con"),L("count name")); return makeconcept(L("con"),L("count name")); } IncrementCount(L("con"),L("countname")) { L("count") = numval(L("con"),L("countname")); if (L("count")) { L("count") = L("count") + 1; replaceval(L("con"),L("countname"),L("count")); } else { addnumval(L("con"),L("countname"),1); L("count") = 1; } return L("count"); } CountName(L("con"),L("root")) { L("count") = IncrementCount(L("con"),L("root")); return L("root") + str(L("count")); } StripEndDigits(L("name")) { if (strisdigit(L("name"))) return 0; L("len") = strlength(L("name")) - 1; L("i") = L("len") - 1; L("str") = strpiece(L("name"),L("i"),L("len")); while (strisdigit(L("str")) && L("i")) { L("i")--; L("str") = strpiece(L("name"),L("i"),L("len")); } return strpiece(L("name"),0,L("i")); } ############################################### # KB Dump Functins ############################################### DumpKB(L("con"),L("file")) { L("dir") = G("$apppath") + "/kb/"; L("filename") = L("dir") + L("file") + ".kb"; if (!kbdumptree(L("con"),L("filename"))) { "kb.txt" << "FAILED dump: " << L("filename") << "\n"; } else { "kb.txt" << "DUMPED: " << L("filename") << "\n"; } } TakeKB(L("filename")) { L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb"; "kb.txt" << "Taking: " << L("path") << "\n"; if (take(L("path"))) { "kb.txt" << " Taken successfully: " << L("path") << "\n"; } else { "kb.txt" << " Taken FAILED: " << L("path") << "\n"; } } ChildCount(L("con")) { L("count") = 0; L("child") = down(L("con")); while (L("child")) { L("count")++; L("child") = next(L("child")); } return L("count"); } ############################################### # KBB DISPLAY FUNCTIONS ############################################### ############################################### # display type: # 0 compact with ellipses on long attr values # 1 full, more spread out # 2 compact without ellipses on long attr values ############################################### # con: root concept to save # name: file path without kbb SaveToKB(L("con"),L("name")) { L("filepath") = G("$kbpath") + L("name") + ".kbb"; L("file") = openfile(L("filepath")); SaveKB(L("file"),L("con"),2); closefile(L("file")); } SaveKB(L("file"),L("top con"),L("display type")) { DisplayKBRecurse(L("file"),L("top con"),0,L("display type")); L("file") << "\n"; return L("top con"); } DisplayKB(L("top con"),L("display type")) { L("file") = DisplayFileName(); DisplayKBRecurse(L("file"),L("top con"),0,L("display type")); L("file") << "\n"; return L("top con"); } KBHeader(L("text")) { L("file") = DisplayFileName(); L("file") << "#######################\n"; L("file") << "# " << L("text") << "\n"; L("file") << "#######################\n\n"; } DisplayFileName() { if (num(G("$passnum")) < 10) { L("file") = "ana00" + str(G("$passnum")); }else if (num(G("$passnum")) < 100) { L("file") = "ana0" + str(G("$passnum")); } else { L("file") = "ana" + str(G("$passnum")); } L("file") = L("file") + ".kbb"; return L("file"); } DisplayKBRecurse(L("file"),L("parent"),L("level"),L("display type")) { if (L("level") == 0) { L("file") << conceptname(L("parent")) << "\n"; } L("con") = down(L("parent")); while (L("con")) { L("file") << SpacesStr(L("level")+1) << conceptname(L("con")); DisplayAttributes(L("file"),L("con"),L("display type"),L("level")); L("file") << "\n"; if (down(L("con"))) { L("lev") = 1; DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("display type")); } L("con") = next(L("con")); } } DisplayAttributes(L("file"),L("con"),L("display type"),L("level")) { L("attrs") = findattrs(L("con")); if (L("attrs")) L("file") << ": "; if (L("display type") == 1 && L("attrs")) L("file") << "\n"; L("first attr") = 1; while (L("attrs")) { L("vals") = attrvals(L("attrs")); L("count") = ValCount(L("vals")); if (L("display type") != 1 && !L("first attr")) { L("file") << ", "; } if (L("display type") == 1) { if (!L("first attr")) L("file") << "\n"; L("file") << SpacesStr(L("level")+2); } L("name") = attrname(L("attrs")); L("file") << QuoteIfNeeded(L("name")) << "="; L("first") = 1; L("type") = attrtype(L("con"),L("name")); while (L("vals")) { if (!L("first")) L("file") << ","; else if (L("type") != 2 && L("count") > 1) L("file") << "["; if (L("type") == 1) { L("num") = getnumval(L("vals")); L("file") << str(L("num")); } else if (L("type") == 2) { if (L("first")) L("file") << "["; L("c") = getconval(L("vals")); L("file") << conceptpath(L("c")); } else if (L("type") == 3) { L("flt") = getfltval(L("vals")); L("file") << str(L("flt")); } else { L("val") = getstrval(L("vals")); if (L("display type") == 0 && strlength(L("val")) > 20) { L("shorty") = strpiece(L("val"),0,20); L("val") = L("shorty") + "..."; } L("file") << QuoteIfNeeded(str(L("val"))); } L("first") = 0; L("vals") = nextval(L("vals")); } if (L("type") == 2 || L("count") > 1) L("file") << "]"; L("first attr") = 0; L("attrs") = nextattr(L("attrs")); } } QuoteIfNeeded(L("str")) { if (!L("str")) return 0; L("new") = L("str"); if (strcontains("-",L("str")) || strcontains(" ",L("str")) || strcontains("[",L("str")) || strcontains("]",L("str"))) L("new") = "\"" + L("new") + "\""; return L("new"); } # Because NLP++ doesn't allow for empty strings, # this function can only be called with "num" >= 1 SpacesStr(L("num")) { L("n") = 1; L("spaces") = " "; while (L("n") < L("num")) { L("spaces") = L("spaces") + " "; L("n")++; } return L("spaces"); } PadStr(L("num str"),L("pad str"),L("pad len")) { L("len") = strlength(L("num str")); L("pad") = 0; L("to pad") = L("pad len") - L("len"); while (L("i")++ < L("to pad")) { L("pad") = L("pad") + L("pad str"); } L("padded") = L("pad") + L("num str"); return L("padded"); } ############################################### # DICTIONARY FUNCTIONS ############################################### DictionaryStart() { G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb"; G("attrs") = openfile(G("attrs path")); } DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) { addword(L("word")); addword(L("attrName")); G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n"; G("attrs") << findwordpath(L("attrName")) << "\n"; if (L("attrType") == "str") G("attrs") << "pst\n" << "\"" << L("value") << "\""; else if (L("attrType") == "num") G("attrs") << "pnum\n" << str(L("value")); else if (L("attrType") == "con") G("attrs") << "pcon\n" << conceptpath(L("value")); G("attrs") << "\nend ind\n\n"; } DictionaryEnd() { G("attrs") << "\nquit\n\n"; closefile(G("attrs")); } OrderByCount(L("words"),L("order")) { L("done") = 0; L("sanity") = 0; while (!L("done")) { L("done") = 1; L("conmax") = 0; L("max") = 0; L("word") = down(L("words")); while (L("word")) { L("check") = numval(L("word"),"checked"); if (!L("check")) { L("done") = 0; L("count") = numval(L("word"),"count"); if (L("count") > L("max")) { "max.txt" << conceptname(L("word")) << " " << L("count") << "\n"; L("max") = L("count"); L("conmax") = L("word"); } } L("word") = next(L("word")); } if (!L("done") && L("conmax")) { L("word") = conceptname(L("conmax")); L("con") = makeconcept(L("order"),L("word")); if (!spellword(L("word"))) { addnumval(L("con"),"unknown",1); } addnumval(L("con"),"count",L("max")); addnumval(L("conmax"),"checked",1); } if (L("safety")++ > 300) { L("done") = 1; } } } @@DECL
@PATH _ROOT _educationZone # Mark the start of subzone in parse tree. @CHECK #Ngt(1,"instance",0) if (N("instance",1) <= 0) fail(); @RULES _eduStart <- _LINE [s] @@
@CODE L("filename") = G("$apppath") + "\\input\\pickins.txt"; "debug.txt" << L("filename") << "\n"; if (!G("$isdirrun") || G("$isfirstfile")) { G("out") = openfile(L("filename")); } else { G("out") = openfile(L("filename"),"app"); } @@CODE
@PATH _ROOT _termEntry _base @POST excise(1,1); @RULES _xNIL <- _xWHITE [s] ### (1) @@
@DECL ############################################### # General functions ############################################### AddUniqueCon(L("concept"),L("name")) { L("con") = findconcept(L("concept"),L("name")); if (!L("con")) L("con") = makeconcept(L("concept"),L("name")); return L("con"); } AddUniqueStr(L("concept"),L("attr"),L("value")) { if (L("value") && strval(L("concept"),L("attr")) != L("value")) addstrval(L("concept"),L("attr"),L("value")); } AddUniqueNum(L("concept"),L("attr"),L("value")) { "unique.txt" << L("attr") << " " << str(L("value")) << " " << conceptpath(L("concept")) << "\n"; L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("num") = getnumval(L("val")); "unique.txt" << " value: " << str(L("num")) << "\n"; if (L("num") == L("value")) return 0; L("val") = nextval(L("val")); } addnumval(L("concept"),L("attr"),L("value")); return 1; } AddUniqueConVal(L("concept"),L("attr"),L("value")) { "unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n"; L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("con") = getconval(L("val")); "unique.txt" << conceptname(L("con")) << "\n"; if (conceptpath(L("con")) == conceptpath(L("value"))) return 0; L("val") = nextval(L("val")); } addconval(L("concept"),L("attr"),L("value")); return 1; } CopyAttr(L("from"),L("to"),L("attr")) { L("from value") = strval(L("from"),L("attr")); if (L("from value")) { L("to value") = strval(L("to"),L("attr")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr"),L("from value")); } } CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) { L("from value") = strval(L("from"),L("attr from")); if (L("from value")) { L("to value") = strval(L("to"),L("attr to")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr to"),L("from value")); } } CopyConAttr(L("from"),L("to"),L("attr")) { L("from value") = conval(L("from"),L("attr")); if (L("from value")) { L("to value") = conval(L("to"),L("attr")); if (L("from value") && !L("to value")) addconval(L("to"),L("attr"),L("from value")); } } AttrValues(L("con"),L("attr")) { L("at") = findattr(L("con"),L("attr")); if (L("at")) return attrvals(L("at")); return 0; } LastChild(L("parent")) { L("child") = down(L("parent")); while (L("child")) { L("last") = L("child"); L("child") = next(L("child")); } return L("last"); } MakeCountCon(L("con"),L("count name")) { L("count name") = CountName(L("con"),L("count name")); return makeconcept(L("con"),L("count name")); } IncrementCount(L("con"),L("countname")) { L("count") = numval(L("con"),L("countname")); if (L("count")) { L("count") = L("count") + 1; replaceval(L("con"),L("countname"),L("count")); } else { addnumval(L("con"),L("countname"),1); L("count") = 1; } return L("count"); } CountName(L("con"),L("root")) { L("count") = IncrementCount(L("con"),L("root")); return L("root") + str(L("count")); } StripEndDigits(L("name")) { if (strisdigit(L("name"))) return 0; L("len") = strlength(L("name")) - 1; L("i") = L("len") - 1; L("str") = strpiece(L("name"),L("i"),L("len")); while (strisdigit(L("str")) && L("i")) { L("i")--; L("str") = strpiece(L("name"),L("i"),L("len")); } return strpiece(L("name"),0,L("i")); } ############################################### # KB Dump Functins ############################################### DumpKB(L("con"),L("file")) { L("dir") = G("$apppath") + "/kb/"; L("filename") = L("dir") + L("file") + ".kb"; if (!kbdumptree(L("con"),L("filename"))) { "kb.txt" << "FAILED dump: " << L("filename") << "\n"; } else { "kb.txt" << "DUMPED: " << L("filename") << "\n"; } } TakeKB(L("filename")) { L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb"; "kb.txt" << "Taking: " << L("path") << "\n"; if (take(L("path"))) { "kb.txt" << " Taken successfully: " << L("path") << "\n"; } else { "kb.txt" << " Taken FAILED: " << L("path") << "\n"; } } ChildCount(L("con")) { L("count") = 0; L("child") = down(L("con")); while (L("child")) { L("count")++; L("child") = next(L("child")); } return L("count"); } ############################################### # KBB DISPLAY FUNCTIONS ############################################### DisplayKB(L("top con"),L("full")) { L("file") = DisplayFileName(); DisplayKBRecurse(L("file"),L("top con"),0,L("full")); L("file") << "\n"; return L("top con"); } KBHeader(L("text")) { L("file") = DisplayFileName(); L("file") << "#######################\n"; L("file") << "# " << L("text") << "\n"; L("file") << "#######################\n\n"; } DisplayFileName() { if (num(G("$passnum")) < 10) { L("file") = "ana00" + str(G("$passnum")); }else if (num(G("$passnum")) < 100) { L("file") = "ana0" + str(G("$passnum")); } else { L("file") = "ana" + str(G("$passnum")); } L("file") = L("file") + ".kbb"; return L("file"); } DisplayKBRecurse(L("file"),L("top"),L("level"),L("full")) { if (L("level") == 0) { L("file") << conceptname(L("top")) << "\n"; } L("con") = down(L("top")); while (L("con")) { L("file") << SpacesStr(L("level")+1) << conceptname(L("con")); DisplayAttributes(L("file"),L("con"),L("full"),L("level")); L("file") << "\n"; if (down(L("con"))) { L("lev") = 1; DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("full")); } L("con") = next(L("con")); } } DisplayAttributes(L("file"),L("con"),L("full"),L("level")) { L("attrs") = findattrs(L("con")); if (L("attrs")) L("file") << ": "; if (L("full") && L("attrs")) L("file") << "\n"; L("first attr") = 1; while (L("attrs")) { L("vals") = attrvals(L("attrs")); if (!L("full") && !L("first attr")) { L("file") << ", "; } if (L("full")) { if (!L("first attr")) L("file") << "\n"; L("file") << SpacesStr(L("level")+2); } L("file") << attrname(L("attrs")) << "=["; L("first") = 1; while (L("vals")) { if (!L("first")) L("file") << ","; L("val") = getstrval(L("vals")); L("num") = getnumval(L("vals")); L("con") = getconval(L("vals")); if (L("con")) { L("file") << conceptpath(L("con")); } else if (!L("full") && strlength(L("val")) > 20) { L("shorty") = strpiece(L("val"),0,20); L("file") << L("shorty"); L("file") << "..."; if (strendswith(L("val"),"\"")) L("file") << "\""; } else if (L("num") > -1) { L("file") << str(L("num")); } else { L("file") << L("val"); } L("first") = 0; L("vals") = nextval(L("vals")); } L("file") << "]"; L("first attr") = 0; L("attrs") = nextattr(L("attrs")); } } # Because NLP++ doesn't allow for empty strings, # this function can only be called with "num" >= 1 SpacesStr(L("num")) { L("n") = 1; L("spaces") = " "; while (L("n") < L("num")) { L("spaces") = L("spaces") + " "; L("n")++; } return L("spaces"); } ############################################### # DICTIONARY FUNCTIONS ############################################### DictionaryClear() { G("dictionary path") = G("$apppath") + "\\kb\\user\\dictionary.kb"; G("dictionary") = openfile(G("dictionary path")); } DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) { L("file") = G("dictionary"); if (!dictfindword(L("word"))) L("file") << "add word \"" + L("word") + "\"\n"; L("file") << "ind attr\n" << findwordpath(L("word")) << "\n0\n"; L("file") << findwordpath(L("attrName")) << "\n"; if (L("attrType") == "str") L("file") << "pst\n" << L("value"); else if (L("attrType") == "num") L("file") << "pnum\n" << str(L("value")); else if (L("attrType") == "con") L("file") << "pcon\n" << conceptpath(L("value")); L("file") << "\nend ind\n\n"; } DictionaryEnd() { G("dictionary") << "\nquit\n\n"; closefile(G("dictionary")); } @@DECL
# Send output to standard output or user-supplied stream @CODE # In VisualText, output to a file.  Outside VisualText, output to user-supplied stream. if (interactive())    G("out") = "out.txt"; else    G("out") = cout(); G("out") << "Hello output stream!" << "\n"; @@CODE
@PATH _ROOT _paragraph _sentence @POST L("con") = AddOrder(N(1),X("sent",3)); pncopyvars(N(1),L("con")); @RULES _xNIL <- _xWILD [one match=(_name _have _for _money _buy _acquire _date _anaphora)] @@
@PATH _ROOT _edge @RULES _child <- _x ### (1) @@
# Convert string to XML or HTML string "output.txt" << xmlstr("hello&bye") << "\n"; Outputs: hello&amp;bye
@CODE SaveKB("format.kbb",G("format"),2); SaveKB("everything.kbb",findroot(),2); @@CODE
@NODES _ROOT @POST L("leaf") = pndown(N(1)); pnmakevar(L("leaf"), "stop", 1); splice(1,1); @RULES _xNIL <- _stop ### (1) @@
@NODES _LINE @PRE #<8,8> length(5); <10,10> length(4); <13,13> cap(); @RULES # Ex: Newt\_York,\_NY,\_ _cityStateZip <- _city [s layer=(_cityName)] \, [s opt] _xWHITE [star s] _state [one s layer=(_stateName)] \. [s opt] \, [s opt] _xWHITE [star s] _xNUM [s layer=(_zipCode)] \- [s opt] _xNUM [s opt layer=(_zipSuffix)] \, [s opt] _xWHITE [star s] _country [s opt layer=(_country)] @@ @POST S("city") = N("$text",1); S("state") = N("$text",4); single(); @RULES # Ex: Newt\_York,\_NY,\_ _cityState <- _city [s layer=(_cityName)] \, [s opt] _xWHITE [star s] _state [one s layer=(_stateName)] @@ @PRE <1,1> cap(); <3,3> cap(); #<10,10> length(5); <12,12> length(4); @POST group(1, 3, "_cityName"); single(); @RULES # Ex: Newt\_York,\_NY,\_ _cityStateZip <- _xALPHA [s opt] _xWHITE [star s] _xALPHA [s] \, [s opt] _xWHITE [star s] _state [one trig s layer=(_stateName)] \. [s opt] \, [s opt] _xWHITE [star s] _xNUM [s layer=(_zipCode)] \- [s opt] _xNUM [s opt layer=(_zipSuffix)] \, [s opt] _xWHITE [star s] _country [s opt layer=(_country)] @@ #@POST # S("city") = N("$text",1); # S("state") = N("$text",4); # single()
@NODES _LINE @POST S("con") = N("$text",2); X("con") = N("$text",2); single(); @RULES _con <- \" ### (1) _xWILD [fail=(_xEND)] ### (2) @@
@DECL ######## # FUNC: ADVLREGISTER # SUBJ: Register an adverbial in the kb. # INPUT: # OUTPUT: # NOTE: For adverbials not attached to a clause, eg, at the # start of a sentence. # Register for subsequent clauses. # Check for prior clauses. ######## advlregister( L("anode"), # Adverbial node. L("snode") # Sentence node. ) { if (!L("anode") || !L("snode")) return; #domadvlregister(L("anode"),L("snode")); # Get and update count of advls in sentence node. L("nadvls") = pnvar(L("snode"),"nadvls"); pnreplaceval(L("snode"),"nadvls", ++L("nadvls")); # Get sentence's kb concept. L("kb sent") = pnvar(L("snode"), "kb sent"); L("advl name") = "advl" + str(L("nadvls")); L("kb concept") = makeconcept(L("kb sent"), L("advl name")); replaceval(L("kb concept"),"type","advl"); replaceval(L("kb concept"),"zone",pnvar(L("anode"),"zone")); if (G("verbose")) "dump.txt" << "[advlreg: " << conceptname(L("kb sent")) << " " << L("advl name") << "]\n"; if (G("verbose")) "dump.txt" << "[text= " << pnvar(L("anode"),"$text") << "]\n"; # Advl pnode points to kb advl concept. pnreplaceval(L("anode"), "kb concept", L("kb concept")); # Some crosslinks. L("advls") = pnvar(L("snode"),"advls"); L("len") = arraylength(L("advls")); if (L("advls")) L("advls")[L("len")] = L("anode"); else L("advls") = L("anode"); # First one. pnreplaceval(L("snode"),"advls",L("advls")); } ######## # FUNC: CLAUSETRAVERSE # SUBJ: Manually traverse a clause in the parse tree. # INPUT: # OUTPUT: ######## clausetraverse( L("node"), # Root node within a clause L("cnode"), # Clause node. L("sent") # Current sentence. ) { if (!L("node") || !L("cnode")) return; if (G("verbose")) "dump.txt" << "[clausetraverse:]" << "\n"; # Get clause's kb concept. L("kb concept") = pnvar(L("cnode"), "kb concept"); if (!L("kb concept")) return; # Check for eventive np. # L("nm") = pnname(L("node")); if (L("nm") == "_np") { # Record actor or object. L("con") = objectregister(L("node"),L("cnode")); # Resolve with existing objects/events. resolveobjects(L("con"),G("objects"),G("events")); return; } if (L("nm") != "_clause" && G("verbose")) "dump.txt" << "[clausetraverse: concept=" << L("nm") << "]\n"; # Get first child node. L("n") = pndown(L("node")); while (L("n")) { L("nname") = pnname(L("n")); if (L("nname") == "_np" || L("nname") == "_nps") { if (pnvar(L("n"),"eventive")) domnp(L("n"),L("sent")); pnreplaceval(L("sent"),"last np", L("n")); # Record actor or object. L("con") = objectregister(L("n"),L("cnode")); # Resolve with existing objects/events. resolveobjects(L("con"),G("objects"),G("events")); } else if (L("nname") == "_vg") { # Record act. L("nacts") = pnvar(L("cnode"),"nacts"); pnreplaceval(L("cnode"),"nacts", ++L("nacts")); L("con") = makeconcept(L("kb concept"),"act" + str(L("nacts"))); replaceval(L("con"),"type","act"); replaceval(L("con"),"text", strtolower(pnvar(L("n"),"$text"))); pnreplaceval(L("n"),"kb act",L("con")); # Only handling singletons for the moment. if (L("nsem") = pnvar(L("n"),"sem")) replaceval(L("con"),"sem", L("nsem")); if (L("tmp") = pnvar(L("n"),"act val")) replaceval(L("con"),"act val",L("tmp")); if (pnvar(L("n"),"passive")) replaceval(L("con"),"passive",1); if (pnvar(L("n"),"neg")) replaceval(L("con"),"negative",1); } else if (L("nname") == "_advl" && pnvar(L("n"),"pp")) { clauseadvltraverse(L("n"),L("cnode"),L("sent")); } else if (L("nname") == "_advl" && pnvar(L("n"),"pattern") == "that-clause" ) { # Assume np+that+clause pattern. L("clause") = pnvar(L("n"),"clause"); clauseregister(L("clause"), L("sent")); clausetraverse(L("clause"),L("clause"),L("sent")); } else if (L("nname") == "_adj") # assume np vg adj pattern { # Record state. L("con") = stateregister(L("n"),L("cnode")); } else if (L("nname") == "_clause") { "clauses.txt" << pnvar(L("n"),"$text") << "\n\n"; clauseregister(L("n"), L("sent")); clausetraverse(L("n"), L("n"), L("sent")); } else # Traverse advl etc. clausetraverse(L("n"), L("cnode"), L("sent")); L("n") = pnnext(L("n")); } # If the current item that we've traversed is a clause, # fix up the event semantics for it. semclause(L("node"),L("sent")); } ######## # FUNC: CLAUSEADVLTRAVERSE # SUBJ: Manually traverse an adverbial in the parse tree. # INPUT: # OUTPUT: ######## clauseadvltraverse( L("n"), # Adverbial node. L("cnode"), L("sent") # Current sentence. ) { if (G("verbose")) "dump.txt" << "[clauseadvltraverse:]" << "\n"; domadvl(L("n"), L("sent")); if (L("nps") = pnvar(L("n"),"pn nps")) { L("advl np") = L("nps")[0]; pnreplaceval(L("sent"),"last np", L("nps")[0]); # (Reverse order...) if (L("cnode")) { L("con") = objectregister(L("advl np"),L("cnode")); L("np con") = resolveobjects(L("con"),G("objects"), G("events")); if (!L("np con") && G("verbose")) "dump.txt" << "no np con" << "\n"; } # If np belongs to an eventive, place it there. if (L("ev") = pnvar(L("advl np"),"pn eventive")) { if (G("verbose")) "dump.txt" << pnvar(L("advl np"),"$text") << " =event=> " << pnvar(L("ev"),"$text") << "\n"; L("ev con") = pnvar(L("ev"),"kb obj"); L("resolver") = conval(L("ev con"),"resolver"); replaceval(L("resolver"),"actor",L("np con")); replaceval(L("np con"),"event",L("resolver")); } } } ######## # FUNC: ADVLTRAVERSE # SUBJ: Handle a non-clause adverbial. # INPUT: # OUTPUT: # NOTE: Not a "traversal", though named that for consistency. # May involve lists and other complexities in the future. ######## advltraverse( L("n"), # Adverbial node. L("sent") # Current sentence. ) { if (G("verbose")) "dump.txt" << "[advltraverse:]" << "\n"; if (!L("n") || !L("sent")) return; domadvl(L("n"),L("sent")); if (L("nps") = pnvar(L("n"),"pn nps")) { L("advl np") = L("nps")[0]; pnreplaceval(L("sent"),"last np", L("nps")[0]); # (Reverse order...) L("con") = objectregister(L("advl np"),L("n")); L("np con") = resolveobjects(L("con"),G("objects"), G("events")); if (!L("np con") && G("verbose")) "dump.txt" << "no np con" << "\n"; # If np belongs to an eventive, place it there. if (L("ev") = pnvar(L("advl np"),"pn eventive")) { if (G("verbose")) "dump.txt" << pnvar(L("advl np"),"$text") << " =event=> " << pnvar(L("ev"),"$text") << "\n"; L("ev con") = pnvar(L("ev"),"kb obj"); L("resolver") = conval(L("ev con"),"resolver"); replaceval(L("resolver"),"actor",L("np con")); replaceval(L("np con"),"event",L("resolver")); } } } ######## # FUNC: ADVLSHANDLE # SUBJ: Traverse tree of adverbials. ######## advlshandle( L("n"), # Adverbial node. L("sent") # Current sentence. ) { if (!L("n") || !L("sent")) return; L("zone") = pnvar(L("n"),"zone"); if (pnvar(L("n"),"advl list")) # List of advls. { L("list") = pndown(L("n")); while (L("list")) { if (L("zone")) pnreplaceval(L("list"),"zone",L("zone")); advlshandle(L("list"),L("sent")); L("list") = pnnext(L("list")); } return; } # Non-list adverbial here. advlhandle(L("n"),L("sent")); } ######## # FUNC: ADVLHANDLE # SUBJ: Handle a clause-independent adverbial. # NOTE: Taken from qclause ######## advlhandle( L("n"), # Adverbial node. L("sent") # Current sentence. ) { if (!L("n") || !L("sent")) return; L("zone") = pnvar(L("n"),"zone"); L("clause") = pnvar(L("n"),"clause"); L("clauses") = pnvar(L("n"),"clauses"); if (L("clause")) { "clauses.txt" << pnvar(L("n"),"$text") << "\n\n"; if (L("clause")) { if (L("zone")) pnreplaceval(L("clause"),"zone",L("zone")); clauseregister(L("clause"),L("sent")); L("last np") = prevnp(L("sent")); clausetraverse(L("clause"),L("clause"),L("sent")); clauseresolve(L("clause"),L("sent"),L("last np")); pnreplaceval(L("sent"),"clause",L("clause")); } else if (L("clauses")) { L("count") = arraylength(L("clauses")); L("ii") = 0; while (L("ii") < L("count")) { L("cls") = L("clauses")[L("ii")]; if (L("zone")) pnreplaceval(L("cls"),"zone",L("zone")); clauseregister(L("cls"),L("sent")); L("last np") = prevnp(L("sent")); clausetraverse(L("cls"),L("cls"),L("sent")); clauseresolve(L("cls"),L("sent"),L("last np")); pnreplaceval(L("sent"),"clause",L("cls")); ++L("ii"); } } } else { # Get object, perhaps semantic case, from pp. # Can register to the current sentence at least. # Could check if before, betwixt, after clauses. if (pnvar(L("n"),"pp")) # Prepositional phrase { # Register adverbial as a kind of nearly empty clause. advlregister(L("n"),L("sent")); # Handle non-clause adverbials. advltraverse(L("n"),L("sent")); } } } ######## # FUNC: SEMCLAUSE # SUBJ: Handle semantics of given clause. # INPUT: # RET: # NOTE: ######## semclause(L("clause"),L("sent")) { if (!L("clause") || !L("sent")) return; if (pnname(L("clause")) != "_clause") return; if (G("verbose")) "dump.txt" << "[semclause: " << pnvar(L("clause"),"$text") << "]\n"; # If clause has an immediately preceding np resolving it, # check active-passive etc. if (L("np") = pnvar(L("clause"),"np-that")) { if (G("verbose")) "dump.txt" << "np-that=" << pnvar(L("np"),"$text") << "\n"; if (!pnvar(L("clause"),"passive")) # Assume passive noted here. { if (!pnvar(L("clause"),"actor")) { # Resolve as actor. pnreplaceval(L("clause"),"actor",L("np")); } } else # passive present. { if (!pnvar(L("clause"),"obj")) { # Resolve as object. pnreplaceval(L("clause"),"obj",L("np")); } } } # If clause has a preceding adverbial, throw it into a date/loc # slot (or something like that). L("advls") = pnvar(L("sent"),"advls"); if (L("advls")) # 09/17/02 AM. { if (G("verbose")) "dump.txt" << "[Found previous advl.]" << "\n"; L("advl") = L("advls")[0]; # Grab only the 1st, for now. L("loc") = pnvar(L("advl"),"pn nps"); pnreplaceval(L("clause"),"loc",L("loc")); } # If clause isn't registered as an event, register it. L("kb concept") = pnvar(L("clause"),"kb concept"); if (!L("kb concept")) return; L("event con") = resolveevent(L("kb concept"),G("events")); if (!L("event con")) return; # Error. # Add the pieces of the event. if (L("act") = pnvar(L("clause"),"act")) { if (L("act con") = pnvar(L("act"),"kb act")) { if (L("res") = conval(L("act con"),"resolver")) L("act con") = L("res"); replaceval(L("event con"),"act",L("act con")); } } if (L("actor") = pnvar(L("clause"),"actor")) { if (L("actor con") = pnvar(L("actor"),"kb obj")) { if (L("res") = conval(L("actor con"),"resolver")) L("actor con") = L("res"); replaceval(L("event con"),"actor",L("actor con")); } } if (L("obj") = pnvar(L("clause"),"dobj")) { if (L("obj con") = pnvar(L("obj")[0],"kb obj")) { if (L("res") = conval(L("obj con"),"resolver")) L("obj con") = L("res"); replaceval(L("event con"),"obj",L("obj con")); } } if (L("iobj") = pnvar(L("clause"),"iobj")) { if (L("iobj con") = pnvar(L("iobj")[0],"kb obj")) { if (L("res") = conval(L("iobj con"),"resolver")) L("iobj con") = L("res"); replaceval(L("event con"),"iobj",L("iobj con")); } } if (L("adj") = pnvar(L("clause"),"adj role")) { if (G("verbose")) "dump.txt" << "adj= " << pnvar(L("adj"),"$text") << "\n"; if (L("state con") = pnvar(L("adj"),"kb con")) { # No resolver mechanism for attrs, states... replaceval(L("event con"),"state",L("state con")); } } if (L("loc") = pnvar(L("clause"),"loc")) { if (L("loc con") = pnvar(L("loc"),"kb obj")) { if (L("res") = conval(L("loc con"),"resolver")) L("loc con") = L("res"); replaceval(L("event con"),"loc",L("loc con")); } } } ######## # FUNC: CLAUSERESOLVE # SUBJ: Resolve inter-clause references. # INPUT: # OUTPUT: ######## clauseresolve( L("cnode"), # Clause node. L("sent"), # Current sentence. L("prev np") # Previous np in sentence. ) { if (!L("cnode") || !L("sent")) return; # Get clause's kb concept. L("kb concept") = pnvar(L("cnode"), "kb concept"); if (!L("kb concept")) return; # Get event resolver. L("event") = conval(L("kb concept"),"resolver"); if (!L("event")) return; L("pattern") = pnvar(L("cnode"),"pattern"); if (L("pattern") == "ellipted-that-clause") { # Look for the immediately preceding np, structurally. if (G("verbose")) "dump.txt" << "ellipted-that-clause\n"; # Get prev np in sent. if (!L("prev np")) return; if (G("verbose")) "dump.txt" << "clauseresolve: prev np=" << pnvar(L("prev np"), "$text") << "\n"; # Assume something like "np vg-passive" if (!(L("con") = pnvar(L("prev np"),"kb obj") )) return; if (!(L("res") = conval(L("con"),"resolver") )) return; replaceval(L("event"),"obj",L("res")); return; } # Get prior clause from current sentence, if any. if (!(L("cprev") = pnvar(L("sent"),"clause"))) return; # Get prev clause's kb concept. L("kb cprev") = pnvar(L("cprev"), "kb concept"); if (!L("kb cprev")) return; # Get event resolver. L("prev event") = conval(L("kb cprev"),"resolver"); if (!L("prev event")) return; if (G("verbose")) "dump.txt" << "[clauseresolve: got prior clause " << conceptname(L("kb cprev")) << "]\n"; # Only handle "that-clause. if (L("pattern") != "that-clause") return; if (G("verbose")) "dump.txt" << "[clauseresolve: got that-clause]" << "\n"; # If previous already has an object, can't fill it here. if (conval(L("prev event"),"obj")) return; # Fill previous clause's object with current clause. replaceval(L("prev event"),"obj",L("event")); } ######## # FUNC: OBJECTREGISTER # SUBJ: Record object in semantics. # INPUT: # RET: con - Object's concept. # NOTE: ######## #objectregister( # L("n"), # Object's node. # L("cnode") # Object's clause. # ) #{ #if (!L("n") || !L("cnode")) # return 0; # #"dump.txt" << "[objectregister:]" << "\n"; # # Tracking instances of "I". #if (pnvar(L("n"),"stem") == "i") # ++G("1st person"); # # Get clause's kb concept. #L("kb concept") = pnvar(L("cnode"), "kb concept"); #if (!L("kb concept")) # return 0; # # Record actor or object. #L("nobjs") = pnvar(L("cnode"),"nobjs"); #pnreplaceval(L("cnode"),"nobjs", ++L("nobjs")); #L("con") = makeconcept(L("kb concept"),"obj" + str(L("nobjs"))); #replaceval(L("con"),"type","obj"); #replaceval(L("con"),"text", # strtolower(pnvar(L("n"),"$text"))); #if (L("sem") = pnvar(L("n"),"sem")) # replaceval(L("con"),"sem",strtolower(L("sem"))); #pnreplaceval(L("n"),"kb obj",L("con")); # #if (pnvar(L("n"),"eventive")) # # replaceval(L("con"),"eventive",1); # # # Handle singleton. #if (L("nsem") = pnvar(L("n"),"sem")) # replaceval(L("con"),"sem", L("nsem")); # #if (pnvar(L("n"),"pro")) # If a pronoun. # replaceval(L("con"),"pro",1); # #domcopynodetocon(L("n"),L("con")); # # If a list of nps, handle it. # (nps were collected in REVERSE order.) # #if (L("count") = pnvar(L("n"),"count")) # replaceval(L("con"),"count",L("count")); #L("nps") = pnvar(L("n"),"nps"); # List of np nodes. #L("num") = L("count"); #while (--L("num") >= 0) # { # # Register each object. # # Link to group concept also... # L("one") = L("nps")[L("num")]; # L("obj") = objectregister(L("one"),L("cnode")); # # # Cross link. # addconval(L("con"),"cons",L("obj")); # replaceval(L("obj"),"group",L("con")); # } # #return L("con"); #} ######## # FUNC: RESOLVEOBJECTS # SUBJ: Resolve object reference to objects in the text. # INPUT: # RET: con - Resolving object. # NOTE: Requiring precise text compare, for now. ######## resolveobjects( L("ref"), # An object reference. L("objects"), # KB concept managing objects. L("events") # KB concept managing events. ) { if (!L("ref")) return 0; if (G("verbose")) "dump.txt" << "[resolveobjects: ref=" << conceptname(L("ref")) << "]\n"; # If not eventive... # Resolve object with existing objects in the text. if (numval(L("ref"),"pro")) L("ret") = resolvepro(L("ref"),L("objects"),L("events")); else if (!numval(L("ref"),"eventive")) L("ret") = resolveobject(L("ref"),G("objects")); # Else if eventive... # Resolve with existing events in the text. # else L("ret") = resolveevent(L("ref"),L("events")); L("count") = numval(L("ref"),"count"); if (L("count") <= 1) # Single object. return L("ret"); L("num") = 0; L("cons") = findvals(L("ref"),"cons"); while (L("num") < L("count")) { L("con") = getconval(L("cons")); if (numval(L("con"),"pro")) # If a pronoun... resolvepro(L("con"),L("objects"),L("events")); else if (numval(L("con"),"eventive")) resolveevent(L("con"),L("events")); else resolveobject(L("con"),L("objects")); ++L("num"); L("cons") = nextval(L("cons")); } return L("ret"); } ######## # FUNC: RESOLVEPRO # SUBJ: Resolve pronoun reference to objects in the text. # INPUT: # RET: con - Resolving object. # NOTE: May handle other anaphoric utterances, eventually. ######## resolvepro( L("ref"), # An object reference. L("objects"), # KB concept managing objects. L("events") # KB concept managing events. ) { if (!L("objects") || !L("events") || !L("ref")) return 0; L("list") = down(L("objects")); if (!L("list")) # Empty objects list. { if (G("verbose")) "dump.txt" << "[resolvepro: No objects.]\n"; # Couldn't resolve. # Could be something like "It is raining...." return 0; } # Traverse the list of existing objects, looking for a mergable # object. (Todo: look at prior events, states also...) L("merged") = 0; # Not merged with an existing object. L("done") = 0; L("cand") = L("list"); # Candidate object. # Go to end of the list. while (next(L("cand"))) L("cand") = next(L("cand")); while (!L("done")) { # Trivial heuristic. Look back to the nearest object # that doesn't conflict with person, plural, etc. # Examine individual pronouns I, we, you, etc.... # Should use hierarchy concepts as part of the merge test... if (mergableobjs(L("ref"),L("cand")) ) { if (G("verbose")) "dump.txt" << "[resolvepro: Trivial proximity heur, merge with " << conceptname(L("cand")) << "]\n"; # Successful merge. L("merged") = 1; L("done") = 1; addconval(L("cand"),"refs",L("ref")); replaceval(L("ref"),"resolver",L("cand")); return L("cand"); } L("cand") = prev(L("cand")); if (!L("cand")) L("done") = 1; } } if (!L("merged")) # Didn't find an existing object. { return 0; } ######## # FUNC: MERGABLEOBJS # SUBJ: See if object reference can be merged with candidate. # INPUT: # RET: bool - 1 if mergable, else 0. # NOTE: ######## mergableobjs(L("ref"), # Object reference. L("cand")) # Object in kb list. { if (!L("ref") || !L("cand")) return 0; L("sem") = strval(L("cand"),"sem"); # Don't merge with dates, for now! if (L("sem") == "date") return 0; # Don't merge with locations for now! # Todo: better get of location semantics. if (L("sem") == "country") return 0; # Todo: Put more tests here, like plural, etc. return 1; } ######## # FUNC: STATEREGISTER # SUBJ: Record state in semantics. # INPUT: # RET: con - state's concept. # NOTE: ######## stateregister( L("n"), # State node. L("cnode") # Clause. ) { if (!L("n") || !L("cnode")) return 0; # Get clause's kb concept. L("kb concept") = pnvar(L("cnode"), "kb concept"); if (!L("kb concept")) return 0; # Record actor or object. L("nstates") = pnvar(L("cnode"),"nstates"); pnreplaceval(L("cnode"),"nstates", ++L("nstates")); L("con") = makeconcept(L("kb concept"),"state" + str(L("nstates"))); replaceval(L("con"),"type","state"); replaceval(L("con"),"text", strtolower(pnvar(L("n"),"$text"))); pnreplaceval(L("n"),"kb con",L("con")); # Handle singleton. if (L("nsem") = pnvar(L("n"),"sem")) replaceval(L("con"),"sem", L("nsem")); return L("con"); } ######## # FUNC: RESOLVEEVENT # SUBJ: Resolve eventive np reference to events in the text. # INPUT: # RETURN: event_con - Resolved event concept. # NOTE: Requiring precise text compare, for now. ######## resolveevent( L("ref"), # An eventive np reference. L("events") # KB concept managing events in current text. ) { if (!L("events") || !L("ref")) return 0; L("list") = down(L("events")); if (!L("list")) # Empty events list. { # Just add the reference and done. return newevent(L("ref"),L("events")); } # Traverse the list of existing events, looking for a mergable # event. For now, requiring exact text match. L("merged") = 0; # Not merged with an existing event. L("done") = 0; L("cand") = L("list"); # Candidate object. L("rtext") = strval(L("ref"),"text"); while (!L("done")) { # Should use hierarchy concepts as part of the merge test... # "dump.txt" << "[resolveevent: cand=" # << strval(L("cand"),"text") # << " ref=" << L("rtext") << "]\n"; if (L("rtext") && strval(L("cand"),"text") == L("rtext") ) { # Successful merge. L("merged") = 1; L("done") = 1; addconval(L("cand"),"refs",L("ref")); replaceval(L("ref"),"resolver",L("cand")); dommergeevent(L("ref"),L("cand")); return L("cand"); } L("cand") = next(L("cand")); if (!L("cand")) L("done") = 1; } # Didn't find an existing object. # Just add the reference and done. return newevent(L("ref"),L("events")); } ######## # FUNC: NEWEVENT # SUBJ: Register a new (unmergeable) event in semantics. # INPUT: # OUTPUT: ######## newevent( L("ref"), # Reference concept. L("events") # Kb list of event concepts to update. ) { L("ct") = numval(L("events"),"count"); replaceval(L("events"),"count", ++L("ct")); # inc count. L("nm") = "event" + str(L("ct")); if (G("verbose")) "dump.txt" << "[new event: " << L("nm") << "]\n"; L("con") = makeconcept(L("events"),L("nm")); replaceval(L("con"),"type","event"); L("rtext") = strval(L("ref"),"text"); if (L("rtext")) replaceval(L("con"),"text",L("rtext")); addconval(L("con"),"refs",L("ref")); # Point to ref concept. replaceval(L("ref"),"resolver",L("con")); # Application-specific fixups. domnewevent(L("ref"),L("con")); # 09/18/02 AM. return L("con"); } ######## # FUNC: COMPLEXTRAVERSE # SUBJ: Manually traverse a complex clause in the parse tree. # INPUT: # OUTPUT: ######## complextraverse( L("cxnode"), # Complex node. L("snode") # Sentence node. ) { if (!L("cxnode") || !L("snode")) return; L("n") = pndown(L("cxnode")); while (L("n")) { if (pnname(L("n")) == "_clause") { "clauses.txt" << pnvar(L("n"),"$text") << "\n\n"; L("last np") = prevnp(L("snode")); # 09/22/02 AM. pnreplaceval(L("n"),"zone",pnvar(L("cxnode"),"zone")); clauseregister(L("n"),L("snode")); clausetraverse(L("n"),L("n"),L("snode")); clauseresolve(L("n"),L("snode"),L("last np")); } L("n") = pnnext(L("n")); } } @CODE L("hello") = 0; @@CODE
@NODES _ROOT @POST S("con") = makeconcept(G("word"),N("header",1)); addstrval(S("con"),"language",N("header", 1)); single(); @RULES _language <- _headerZone ### (1) @@
# Find entire path of dictionary concept for the given string. (If not present, don't add the word.) L("return_str") = findwordpath(L("str"));
@NODES _ROOT @PRE <1,1> var("start"); <7,7> var("end"); @RULES _attr <- _LINE ### (1) _LINE [group="_concept"] ### (2) _LINE ### (3) _LINE [group="_attrConcept"] ### (4) _LINE [group="_type"] ### (5) _LINE [group="_value"] ### (6) _LINE ### (7) @@
@DECL ############################################### # General functions ############################################### AddUniqueCon(L("concept"),L("name")) { L("con") = findconcept(L("concept"),L("name")); if (!L("con")) L("con") = makeconcept(L("concept"),L("name")); return L("con"); } AddUniqueStr(L("concept"),L("attr"),L("value")) { if (L("value")) { L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("str") = getstrval(L("val")); if (L("str") == L("value")) return 0; L("val") = nextval(L("val")); } addstrval(L("concept"),L("attr"),L("value")); return 1; } return 0; } AddUniqueNum(L("concept"),L("attr"),L("value")) { if (L("value")) { L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("num") = getnumval(L("val")); if (L("num") == L("value")) return 0; L("val") = nextval(L("val")); } addnumval(L("concept"),L("attr"),L("value")); return 1; } return 0; } AddUniqueConVal(L("concept"),L("attr"),L("value")) { "unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n"; L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("con") = getconval(L("val")); "unique.txt" << conceptname(L("con")) << "\n"; if (conceptpath(L("con")) == conceptpath(L("value"))) return 0; L("val") = nextval(L("val")); } addconval(L("concept"),L("attr"),L("value")); return 1; } PathToConcept(L("parent"),L("hier")) { L("cons") = split(L("hier")," "); L("i") = 0; L("con") = L("parent"); while (L("cons")[L("i")]) { L("c") = L("cons")[L("i")]; L("name") = strsubst(L("c"),"\"",0); if (L("name") != "concept") L("con") = AddUniqueCon(L("con"),L("name")); L("i")++; } return L("con"); } CopyAttr(L("from"),L("to"),L("attr")) { L("from value") = strval(L("from"),L("attr")); if (L("from value")) { L("to value") = strval(L("to"),L("attr")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr"),L("from value")); } } CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) { L("from value") = strval(L("from"),L("attr from")); if (L("from value")) { L("to value") = strval(L("to"),L("attr to")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr to"),L("from value")); } } CopyConAttr(L("from"),L("to"),L("attr")) { L("from value") = conval(L("from"),L("attr")); if (L("from value")) { L("to value") = conval(L("to"),L("attr")); if (L("from value") && !L("to value")) addconval(L("to"),L("attr"),L("from value")); } } AttrValues(L("con"),L("attr")) { L("at") = findattr(L("con"),L("attr")); if (L("at")) return attrvals(L("at")); return 0; } ValCount(L("attr")) { L("vals") = attrvals(L("attr")); while (L("con")) { L("count")++; L("con") = nextval(L("con")); } return L("count"); } LastChild(L("parent")) { L("child") = down(L("parent")); while (L("child")) { L("last") = L("child"); L("child") = next(L("child")); } return L("last"); } MakeCountCon(L("con"),L("count name")) { L("count name") = CountName(L("con"),L("count name")); return makeconcept(L("con"),L("count name")); } IncrementCount(L("con"),L("countname")) { L("count") = numval(L("con"),L("countname")); if (L("count")) { L("count") = L("count") + 1; replaceval(L("con"),L("countname"),L("count")); } else { addnumval(L("con"),L("countname"),1); L("count") = 1; } return L("count"); } CountName(L("con"),L("root")) { L("count") = IncrementCount(L("con"),L("root")); return L("root") + str(L("count")); } StripEndDigits(L("name")) { if (strisdigit(L("name"))) return 0; L("len") = strlength(L("name")) - 1; L("i") = L("len") - 1; L("str") = strpiece(L("name"),L("i"),L("len")); while (strisdigit(L("str")) && L("i")) { L("i")--; L("str") = strpiece(L("name"),L("i"),L("len")); } return strpiece(L("name"),0,L("i")); } ############################################### # KB Dump Functins ############################################### DumpKB(L("con"),L("file")) { L("dir") = G("$apppath") + "/kb/"; L("filename") = L("dir") + L("file") + ".kb"; if (!kbdumptree(L("con"),L("filename"))) { "kb.txt" << "FAILED dump: " << L("filename") << "\n"; } else { "kb.txt" << "DUMPED: " << L("filename") << "\n"; } } TakeKB(L("filename")) { L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb"; "kb.txt" << "Taking: " << L("path") << "\n"; if (take(L("path"))) { "kb.txt" << " Taken successfully: " << L("path") << "\n"; } else { "kb.txt" << " Taken FAILED: " << L("path") << "\n"; } } ChildCount(L("con")) { L("count") = 0; L("child") = down(L("con")); while (L("child")) { L("count")++; L("child") = next(L("child")); } return L("count"); } ############################################### # KBB DISPLAY FUNCTIONS ############################################### ############################################### # display type: # 0 compact with ellipses on long attr values # 1 full, more spread out # 2 compact without ellipses on long attr values ############################################### DisplayKB(L("top con"),L("display type")) { L("file") = DisplayFileName(); DisplayKBRecurse(L("file"),L("top con"),0,L("display type")); L("file") << "\n"; return L("top con"); } KBHeader(L("text")) { L("file") = DisplayFileName(); L("file") << "#######################\n"; L("file") << "# " << L("text") << "\n"; L("file") << "#######################\n\n"; } DisplayFileName() { if (num(G("$passnum")) < 10) { L("file") = "ana00" + str(G("$passnum")); }else if (num(G("$passnum")) < 100) { L("file") = "ana0" + str(G("$passnum")); } else { L("file") = "ana" + str(G("$passnum")); } L("file") = L("file") + ".kbb"; return L("file"); } DisplayKBRecurse(L("file"),L("parent"),L("level"),L("display type")) { if (L("level") == 0) { L("file") << conceptname(L("parent")) << "\n"; } L("con") = down(L("parent")); while (L("con")) { L("name") = conceptname(L("con")); L("file") << SpacesStr(L("level")+1); if (DisplayValNeedsQuote(L("name"))) L("file") << "\""; L("file") << L("name"); if (DisplayValNeedsQuote(L("name"))) L("file") << "\""; DisplayAttributes(L("file"),L("con"),L("display type"),L("level")); L("file") << "\n"; if (down(L("con"))) { L("lev") = 1; DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("display type")); } L("con") = next(L("con")); } } DisplayAttributes(L("file"),L("con"),L("display type"),L("level")) { L("attrs") = findattrs(L("con")); if (L("attrs")) L("file") << ": "; if (L("display type") == 1 && L("attrs")) L("file") << "\n"; L("first attr") = 1; while (L("attrs")) { L("vals") = attrvals(L("attrs")); L("count") = ValCount(L("attrs")); if (L("display type") != 1 && !L("first attr")) { L("file") << ", "; } if (L("display type") == 1) { if (!L("first attr")) L("file") << "\n"; L("file") << SpacesStr(L("level")+2); } L("file") << attrname(L("attrs")) << "="; L("first") = 1; while (L("vals")) { L("val") = getstrval(L("vals")); L("num") = getnumval(L("vals")); L("con") = getconval(L("vals")); if (!L("first")) L("file") << ","; else if (L("count") > 1 || L("con")) L("file") << "["; if (L("con")) { if (L("first")) L("file") << "["; L("file") << conceptpath(L("con")); } else if (L("display type") == 0 && strlength(L("val")) > 20) { L("shorty") = strpiece(L("val"),0,20); L("file") << L("shorty"); L("file") << "..."; if (strendswith(L("val"),"\"")) L("file") << "\""; } else if (L("num") > -1) { L("file") << str(L("num")); } else { if (DisplayValNeedsQuote(L("val"))) L("file") << "\""; L("file") << L("val"); if (DisplayValNeedsQuote(L("val"))) L("file") << "\""; } L("first") = 0; L("vals") = nextval(L("vals")); } if (L("con")) L("file") << "]"; L("first attr") = 0; L("attrs") = nextattr(L("attrs")); } } DisplayValNeedsQuote(L("str")) { if (strcontains(" ",L("str")) || strcontains("[",L("str")) || strcontains("]",L("str")) || strcontains(".",L("str"))) return 1; return 0; } # Because NLP++ doesn't allow for empty strings, # this function can only be called with "num" >= 1 SpacesStr(L("num")) { L("n") = 1; L("spaces") = " "; while (L("n") < L("num")) { L("spaces") = L("spaces") + " "; L("n")++; } return L("spaces"); } ############################################### # DICTIONARY FUNCTIONS ############################################### DictionaryStart() { G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb"; G("attrs") = openfile(G("attrs path")); } DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) { addword(L("word")); addword(L("attrName")); G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n"; G("attrs") << findwordpath(L("attrName")) << "\n"; if (L("attrType") == "str") G("attrs") << "pst\n" << "\"" << L("value") << "\""; else if (L("attrType") == "num") G("attrs") << "pnum\n" << str(L("value")); else if (L("attrType") == "con") G("attrs") << "pcon\n" << conceptpath(L("value")); G("attrs") << "\nend ind\n\n"; } DictionaryEnd() { G("attrs") << "\nquit\n\n"; closefile(G("attrs")); } @@DECL
@DECL ############################################### # General functions ############################################### AddUniqueCon(L("concept"),L("name")) { L("con") = findconcept(L("concept"),L("name")); if (!L("con")) L("con") = makeconcept(L("concept"),L("name")); return L("con"); } AddUniqueStr(L("concept"),L("attr"),L("value")) { if (L("value") && strval(L("concept"),L("attr")) != L("value")) addstrval(L("concept"),L("attr"),L("value")); } AddUniqueNum(L("concept"),L("attr"),L("value")) { "unique.txt" << L("attr") << " " << str(L("value")) << " " << conceptpath(L("concept")) << "\n"; L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("num") = getnumval(L("val")); "unique.txt" << " value: " << str(L("num")) << "\n"; if (L("num") == L("value")) return 0; L("val") = nextval(L("val")); } addnumval(L("concept"),L("attr"),L("value")); return 1; } AddUniqueConVal(L("concept"),L("attr"),L("value")) { "unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n"; L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("con") = getconval(L("val")); "unique.txt" << conceptname(L("con")) << "\n"; if (conceptpath(L("con")) == conceptpath(L("value"))) return 0; L("val") = nextval(L("val")); } addconval(L("concept"),L("attr"),L("value")); return 1; } CopyAttr(L("from"),L("to"),L("attr")) { L("from value") = strval(L("from"),L("attr")); if (L("from value")) { L("to value") = strval(L("to"),L("attr")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr"),L("from value")); } } CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) { L("from value") = strval(L("from"),L("attr from")); if (L("from value")) { L("to value") = strval(L("to"),L("attr to")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr to"),L("from value")); } } CopyConAttr(L("from"),L("to"),L("attr")) { L("from value") = conval(L("from"),L("attr")); if (L("from value")) { L("to value") = conval(L("to"),L("attr")); if (L("from value") && !L("to value")) addconval(L("to"),L("attr"),L("from value")); } } AttrValues(L("con"),L("attr")) { L("at") = findattr(L("con"),L("attr")); if (L("at")) return attrvals(L("at")); return 0; } LastChild(L("parent")) { L("child") = down(L("parent")); while (L("child")) { L("last") = L("child"); L("child") = next(L("child")); } return L("last"); } MakeCountCon(L("con"),L("count name")) { L("count name") = CountName(L("con"),L("count name")); return makeconcept(L("con"),L("count name")); } IncrementCount(L("con"),L("countname")) { L("count") = numval(L("con"),L("countname")); if (L("count")) { L("count") = L("count") + 1; replaceval(L("con"),L("countname"),L("count")); } else { addnumval(L("con"),L("countname"),1); L("count") = 1; } return L("count"); } CountName(L("con"),L("root")) { L("count") = IncrementCount(L("con"),L("root")); return L("root") + str(L("count")); } StripEndDigits(L("name")) { if (strisdigit(L("name"))) return 0; L("len") = strlength(L("name")) - 1; L("i") = L("len") - 1; L("str") = strpiece(L("name"),L("i"),L("len")); while (strisdigit(L("str")) && L("i")) { L("i")--; L("str") = strpiece(L("name"),L("i"),L("len")); } return strpiece(L("name"),0,L("i")); } ############################################### # KB Dump Functins ############################################### DumpKB(L("con"),L("file")) { L("dir") = G("$apppath") + "/kb/"; L("filename") = L("dir") + L("file") + ".kb"; if (!kbdumptree(L("con"),L("filename"))) { "kb.txt" << "FAILED dump: " << L("filename") << "\n"; } else { "kb.txt" << "DUMPED: " << L("filename") << "\n"; } } TakeKB(L("filename")) { L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb"; "kb.txt" << "Taking: " << L("path") << "\n"; if (take(L("path"))) { "kb.txt" << " Taken successfully: " << L("path") << "\n"; } else { "kb.txt" << " Taken FAILED: " << L("path") << "\n"; } } ChildCount(L("con")) { L("count") = 0; L("child") = down(L("con")); while (L("child")) { L("count")++; L("child") = next(L("child")); } return L("count"); } ############################################### # KBB DISPLAY FUNCTIONS ############################################### DisplayKB(L("top con"),L("full")) { L("file") = DisplayFileName(); DisplayKBRecurse(L("file"),L("top con"),0,L("full")); L("file") << "\n"; return L("top con"); } KBHeader(L("text")) { L("file") = DisplayFileName(); L("file") << "#######################\n"; L("file") << "# " << L("text") << "\n"; L("file") << "#######################\n\n"; } DisplayFileName() { if (num(G("$passnum")) < 10) { L("file") = "ana00" + str(G("$passnum")); }else if (num(G("$passnum")) < 100) { L("file") = "ana0" + str(G("$passnum")); } else { L("file") = "ana" + str(G("$passnum")); } L("file") = L("file") + ".kbb"; return L("file"); } DisplayKBRecurse(L("file"),L("con"),L("level"),L("full")) { while (L("con")) { L("file") << SpacesStr(L("level")+1) << conceptname(L("con")); DisplayAttributes(L("file"),L("con"),L("full"),L("level")); L("file") << "\n"; if (down(L("con"))) { L("lev") = 1; DisplayKBRecurse(L("file"),down(L("con")),L("level")+L("lev"),L("full")); } if (L("level") == 0) return 0; L("con") = next(L("con")); } } DisplayAttributes(L("file"),L("con"),L("full"),L("level")) { L("attrs") = findattrs(L("con")); if (L("attrs")) L("file") << ": "; if (L("full") && L("attrs")) L("file") << "\n"; L("first attr") = 1; while (L("attrs")) { L("vals") = attrvals(L("attrs")); if (!L("full") && !L("first attr")) { L("file") << ", "; } if (L("full")) { if (!L("first attr")) L("file") << "\n"; L("file") << SpacesStr(L("level")+2); } L("file") << attrname(L("attrs")) << "=["; L("first") = 1; while (L("vals")) { if (!L("first")) L("file") << ","; L("val") = getstrval(L("vals")); L("num") = getnumval(L("vals")); L("con") = getconval(L("vals")); if (L("con")) { L("file") << conceptpath(L("con")); } else if (!L("full") && strlength(L("val")) > 20) { L("shorty") = strpiece(L("val"),0,20); L("file") << L("shorty"); L("file") << "..."; if (strendswith(L("val"),"\"")) L("file") << "\""; } else if (L("num") > -1) { L("file") << str(L("num")); } else { L("file") << L("val"); } L("first") = 0; L("vals") = nextval(L("vals")); } L("file") << "]"; L("first attr") = 0; L("attrs") = nextattr(L("attrs")); } } # Because NLP++ doesn't allow for empty strings, # this function can only be called with "num" >= 1 SpacesStr(L("num")) { L("n") = 1; L("spaces") = " "; while (L("n") < L("num")) { L("spaces") = L("spaces") + " "; L("n")++; } return L("spaces"); } ############################################### # DICTIONARY FUNCTIONS ############################################### DictionaryClear() { G("dictionary path") = G("$apppath") + "\\kb\\user\\dictionary.kb"; G("dictionary") = openfile(G("dictionary path")); G("dictionary words") = G("$apppath") + "\\kb\\user\\wordsdict.kb"; G("words") = openfile(G("dictionary words")); } DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) { L("file") = G("dictionary"); if (!dictfindword(L("word"))) L("words") << "add word \"" + L("word") + "\"\n"; L("file") << "ind attr\n" << findwordpath(L("word")) << "\n0\n"; L("file") << findwordpath(L("attrName")) << "\n"; if (L("attrType") == "str") L("file") << "pst\n" << L("value"); else if (L("attrType") == "num") L("file") << "pnum\n" << str(L("value")); else if (L("attrType") == "con") L("file") << "pcon\n" << conceptpath(L("value")); L("file") << "\nend ind\n\n"; } DictionaryEnd() { G("dictionary") << "\nquit\n\n"; closefile(G("dictionary")); "dict.txt" << "got here" << "\n"; } @@DECL
@NODES _LINE @RULES _word <- _xWILD [match=(_xALPHA _xNUM \.)] @@
# Rename every node that matched the current element to NAME @RULES _locfield <- location \: _xWILD [rename=_location] \n @@
@NODES _LINE @POST X("indent") = 0; @RULES _indent <- _xSTART ### (1) \" ### (2) @@ @POST singler(2,2); @RULES _indent <- _xSTART ### (1) _xWILD [match=(_xWHITE)] ### (2) \" ### (3) @@
# Replace named attribute's value(s) with num. replaceval(L("con"), L("name"), L("num"));
@CODE L("hello") = 0; @@CODE @NODES _clause # there vg @POST if (pnname(N(2)) == "_np") L("n") = pndown(N(2)); else L("n") = N(2); chpos(L("n"),"EX"); # there/EX [DEFAULT] if (pnname(N(4)) == "_vg") if (!N("voice",4)) X("voice") = N("voice",4) = "active"; @RULES _xNIL <- _xWILD [star match=(_advl _adv)] there [s] _xWILD [star match=(_advl _adv)] _vg @@ # there @CHECK if (N("there")) fail(); # Loop guard. @POST if (pnname(N(1)) == "_np") L("n") = pndown(N(1)); else L("n") = N(1); if (!pnvar(L("n"),"mypos")) chpos(L("n"),"RB"); # there/RB [DEFAULT] N("there") = 1; @RULES _xNIL <- there [s] @@ # up # down # out # Not otherwise assigned. @CHECK if (N("mypos",1)) fail(); @POST chpos(N(1),"RB"); @RULES _xNIL <- _xWILD [s one match=(up down out)] _xWILD [star match=(_adv _advl)] _xEND @@ # vg advl # vg by-actor @PRE <1,1> varne("voice","active"); <2,2> var("by-actor"); <2,2> varne("sem","date"); @POST fixvg(N(1),"passive","VBN"); @RULES _xNIL <- _vg _advl @@
@NODES _LINE @POST singler(1,1); @RULES _countryText <- _xWILD [fail=(_codes)] _codes ### (1) @@
@PATH _ROOT _experienceZone _experienceInstance _LINE # If date range wasn't picked up, try single date. # @CHECK if (X("date range",3)) fail(); @POST X("date range",3) = N("$text",1); @RULES _xNIL <- _SingleDate [s] @@ # If city state hasn't been picked up yet and there's a cap turd, # pick it up. @CHECK if ( !X("city",3) && !X("state",3) && !N("capofcap",1) && !N("capandcap",1) && N("len",1) <= 2 ) succeed(); fail(); @POST S("city") = N("$text",1); S("state") = N("$text",4); X("city",3) = N("$text",1); X("state",3) = N("$text",4); single(); @RULES _cityState <- _Caps [s rename=(_city) layer=(_cityName)] \, [s] _xWHITE [s star] _state [s layer=(_stateName)] @@ # If company name not found in instance, take a closer look around. # Pick up unhandled crud on the anchor line!! @CHECK if ( !X("company name",3) && (X("lineno") == X("anchor lineno",3)) && (N("company conf") >= 40 ||N("humanname conf") >= 40) ) succeed(); fail(); @POST X("company name",3) = N("$text",1); @RULES _xNIL <- _Caps [rename=(_company)] @@ # Company name is more reasonable than human name in experience # zone's anchor line. @CHECK if ( !X("company name",3) && X("lineno") == X("anchor lineno",3) ) succeed(); fail(); @POST X("company name",3) = N("$text",1); @RULES _xNIL <- _humanName [rename=(_company)] @@ # Company name still not found. # Check one line away from anchor, as last gasp. @CHECK if ( !X("company name",3) && (N("tmp") = (X("lineno") - X("anchor lineno",3))) && (N("tmp") >= -1 && N("tmp") <= 1) # one line away fm anchor. && (N("company conf") >= 40 || N("humanname conf") >= 40) ) succeed(); fail(); @POST X("company name",3) = N("$text",1); @RULES _xNIL <- _Caps [rename=(_company)] @@
# THIS COMES FROM: # https://conjugator.reverso.net/conjugation-portuguese-verb-ser.html @CODE L("nothing") = 0; @@CODE
@PATH _ROOT _derivedTerms _headerZone _LINE @POST addstrval(N("con",1),"derivedTerms",N("$treetext",1)); @RULES _xNIL <- _derivedTerm ### (1) @@
@DECL DictDisplayAttr(L("con"),L("attr")) { L("vals") = AttrValues(L("con"),L("attr")); L("str") = L("attr") + "="; L("count") = ValCount(L("vals")); if (L("count") > 1) L("str") = L("str") + "["; L("first") = 1; while (L("vals")) { L("val") = getstrval(L("vals")); if (!L("first")) L("str") = L("str") + ","; L("str") = L("str") + QuoteIfNeeded(L("val")); L("vals") = nextval(L("vals")); L("first") = 0; } if (L("count") > 1) L("str") = L("str") + "]"; return L("str"); } @@DECL
@NODES _LINE @POST excise(1,1); noop(); @RULES _xNIL <- _xWHITE [s] ### (1) @@
@NODES _ROOT @POST N("thisone",3) = 1; @RULES _xNIL <- this [s] ### (1) one ### (2) _xALPHA ### (3) @@ @POST S("thisone") = 1; single(); @RULES _this <- that [s] ### (1) one ### (2) _xALPHA ### (3) @@ @POST singler(2,3); @RULES _two <- which ### (1) one ### (2) _xALPHA ### (3) @@ @POST L("underscore") = "_"+N("$text",2); L("node") = group(1,2,L("underscore")); pnmakevar(L("node"),"header",1); @RULES _xNIL <- _xWILD [plus match=(\=)] ### (1) _xALPHA ### (2) @@
# Find the numth node in phrase. L("return_con") = findnode(L("phrase"), L("num"));
@NODES _LINE @PRE <1,1> cap(); @RULES # Ex: Certificates _OtherHeaderWord [layer=(_headerWord )] <- _xWILD [min=1 max=1 s match=("Certificates" "Patents" "Affiliations")] @@
@CODE DictionaryStart(); @@CODE
# Fetch concept, given the path str. L("return_con") = pathconcept(L("str"));
@PATH _ROOT _doctypedecl _EntityDecl @POST S("public")=1 ; S("URI") = N("textValue",3) ; single() ; @@POST @RULES _ExternalID <- _xWILD [min=1 max=1 matches=("PUBLIC")] ### (1) _whiteSpace [one] ### (2) _PubidLiteral [one] ### (3) _whiteSpace [one] ### (4) _PubidLiteral [one] ### (5) @@ _ExternalID <- _xWILD [one matches=("PUBLIC")] ### (1) _whiteSpace [one] ### (2) _PubidLiteral [one] ### (3) _whiteSpace [one] ### (4) _SystemLiteral [one] ### (5) @@ @@RULES @POST S("public")=0 ; S("URI") = N("textValue",3) ; single() ; @@POST @RULES _ExternalID <- _xWILD [min=1 max=1 matches=("SYSTEM")] ### (1) _whiteSpace [opt] ### (2) _PubidLiteral [one] ### (3) @@ _ExternalID <- _xWILD [min=1 max=1 matches=("SYSTEM")] ### (1) _whiteSpace [opt] ### (2) _SystemLiteral [one] ### (3) @@ @@RULES
@CODE #fileout("caps.txt") prlit("caps.txt", "INFO FOR CAPITALIZED PHRASES\n"); prlit("caps.txt", " (Experience zone only)\n"); prlit("caps.txt", "Formula: Job conf=TOTLEN+CAPLEN+JOBNUM*3+JOBEND*10\n"); prlit("caps.txt", "=======================================================\n"); prlit("caps.txt", " TOT CAP UNK JOB JOB CO CO JOB CO CAP\n"); prlit("caps.txt", " LEN LEN LEN NUM END NUM END CNF CNF PHRASE\n"); prlit("caps.txt", " ------------------------------------------------------\n"); @@CODE @PATH _ROOT _experienceZone _LINE @POST # For fun, fetch text into a variable. N("text",1) = N("$text",1); # Special $var fetches node text. "caps.txt" << " " << rightjustifynum(N("len",1),3) << " "; "caps.txt" << rightjustifynum(N("caplen",1),3) << " "; "caps.txt" << rightjustifynum(N("unknowns",1),3) << " "; "caps.txt" << rightjustifynum(N("jobtitleroots",1),3) << " "; "caps.txt" << rightjustifynum(N("end jobtitleroots",1),3) << " "; "caps.txt" << rightjustifynum(N("companyroots",1),3) << " "; "caps.txt" << rightjustifynum(N("end companyroots",1),3) << " "; "caps.txt" << rightjustifynum(N("job conf",1),3) << " "; "caps.txt" << rightjustifynum(N("company conf",1),3) << " "; "caps.txt" << N("text",1) << "\n"; #noop() @RULES _xNIL <- _Caps [s] @@
@CODE L("hello") = 0; @@CODE @NODES _sent # np alpha alpha dqan @CHECK if (!N("verb",3)) fail(); @POST L("tmp3") = N(3); L("tmp4") = N(4); group(3,3,"_verb"); pncopyvars(L("tmp3"),N(3)); L("v") = N(3); group(3,3,"_vg"); mhbv(N(3),L("neg"),0,0,0,0,L("v")); pncopyvars(L("tmp3"),N(3)); N("verb node",3) = L("v"); clearpos(N(3),1,0); fixvg(N(3),"active","VBP"); if (N("adj",4)) { group(4,4,"_adj"); pncopyvars(L("tmp4"),N(4)); fixadj(N(4)); } else if (N("noun",4)) { group(4,4,"_noun"); pncopyvars(L("tmp4"),N(4)); } else { group(4,4,"_adj"); pncopyvars(L("tmp4"),N(4)); fixadj(N(4)); } @RULES _xNIL <- _np _xWILD [star match=(_advl _adv)] _xALPHA _xALPHA _xWILD [one lookahead match=(_adj _noun)] @@ # dqan @CHECK S("last") = lasteltnode(4); if (N(2)) S("first") = N(2); else if (N(3)) S("first") = N(3); else if (N(4)) S("first") = N(4); if (!numbersagree(S("first"),S("last"))) fail(); @POST L("neg") = attrinrange("neg",S("first"),S("last")); if (L("neg")) S("neg") = L("neg"); L("tmp4") = lasteltnode(4); pncopyvars(L("tmp4")); sclearpos(1,0); singler(2,4); @RULES _np <- _xWILD [one match=(_xSTART _verb _vg _prep)] _xWILD [star match=(_det _quan _num _xNUM)] _adj [star] _noun [plus] _xWILD [one lookahead fail=(_noun _adj _xALPHA _aposS)] @@ _np <- _xWILD [one match=(_xSTART _verb _vg _prep)] _xWILD [star match=(_det _quan _num _xNUM)] _adj [star] _noun [plus] _xEND @@ # vg np prep dqan alpha @CHECK if (!N("noun",5)) fail(); @POST L("tmp5") = N(5); group(5,5,"_noun"); pncopyvars(L("tmp5"),N(5)); fixnoun(N(5)); @RULES _xNIL <- # [s] because it could be a clause start. _xWILD [s one match=(_vg _verb)] _np _prep _xWILD [plus match=(_det _pro _quan _num _xNUM _adj _noun)] _xALPHA _xWILD [one lookahead match=(_fnword _qEOS _xEND _adv _advl)] @@ # np alpha dqan @CHECK if (!N("verb",3)) fail(); @POST L("tmp3") = N(3); group(3,3,"_verb"); pncopyvars(L("tmp3"),N(3)); L("v") = N(3); group(3,3,"_vg"); mhbv(N(3),L("neg"),0,0,0,0,L("v")); N("verb node",3) = L("v"); fixvg(N(3),"active","VBP"); clearpos(N(3),1,0); @RULES _xNIL <- _np _xWILD [star match=(_advl _adv)] _xALPHA _xWILD [one lookahead match=(_det _quan _num _xNUM _adj _noun _np)] @@ # Looks too broad and old. # # vg dqan alpha @CHECK if (!N("noun",6)) fail(); if (!N(2) && !N(3) && !N(4) && !N(5) && !N(6)) fail(); if (N("stem",7) == "to") fail(); @POST L("tmp6") = N(6); groupone(N(6),6,"_noun",1); fixnoun(N(6)); dqaninfo(2,3,4,5); S("olast") = 6; S("last") = S("lastn") = S("lastan") = N(6); if (!N(4) && !N(5)) S("firstan") = S("firstn") = N(6); if (!N(5)) S("firstn") = N(6); groupnp(); @RULES _xNIL <- _vg _det [star] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _noun [star] _xALPHA _xWILD [one match=(_advl _adv _clausesep _prep)] @@ # prep np @POST pncopyvars(3); S("prep") = N(1); if (strtolower(N("$text",1)) == "by") { S("by-np") = 1; if (N("sem",3) != "date" && N("sem",3) != "geoloc") S("by-actor") = 1; # 04/21/07 AM. } sclearpos(1,0); singler(1,3); @RULES _advl <- _xWILD [one match=(_prep)] _adv [star] _xWILD [s one match=(_np) except=(_proSubj)] _xWILD [one lookahead fail=(_conj)] @@ # Clausal stuff... # vg np advl vg @CHECK if (N("pos400 vnv",4)) fail(); if (N("voice",4)) fail(); L("start") = N(3); L("end") = lasteltnode(3); if (!pnnameinrange("_advl",L("start"),L("end"))) fail(); @POST N("pos400 vnv",4) = 1; # Loop guard. fixvg(N(4),"passive","VBN"); @RULES _xNIL <- _vg _np _xWILD [plus match=(_advl _adv)] _vg @@ # complex nested clause. # vg vg np adj # ex: This makes going to the store difficult. @CHECK S("stem") = nodestem(N(1)); "zub.txt" << "stem=" << S("stem") << "\n"; if (!finddictattr(S("stem"),"X7")) # keep it green ... fail(); "zub.txt" << "here" << "\n"; if (!vconjq(N(2),"-ing")) fail(); "zub.txt" << "here" << "\n"; @POST group(4,4,"_clausesep"); group(2,2,"_clausestart"); @RULES _xNIL <- _vg _vg _np _xWILD [one match=(_adj _adjc)] @@
@NODES _LINE @RULES _date <- _xNUM ### (1) \/ ### (2) _xNUM ### (3) @@ @PRE <1,1> var("month"); <2,2> var("year"); @RULES _newNode <- _xALPHA ### (1) _xNUM ### (2) to ### (3) present ### (4) @@
@DECL ############################################### # General functions ############################################### AddUniqueCon(L("concept"),L("name")) { L("con") = findconcept(L("concept"),L("name")); if (!L("con")) L("con") = makeconcept(L("concept"),L("name")); return L("con"); } AddUniqueStr(L("concept"),L("attr"),L("value")) { if (L("value")) { L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("str") = getstrval(L("val")); if (L("str") == L("value")) return 0; L("val") = nextval(L("val")); } addstrval(L("concept"),L("attr"),L("value")); return 1; } return 0; } AddUniqueNum(L("concept"),L("attr"),L("value")) { if (L("value")) { L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("num") = getnumval(L("val")); if (L("num") == L("value")) return 0; L("val") = nextval(L("val")); } addnumval(L("concept"),L("attr"),L("value")); return 1; } return 0; } AddUniqueConVal(L("concept"),L("attr"),L("value")) { "unique.txt" << L("attr") << " " << conceptpath(L("concept")) << " ==> " << L("attr") << " -- " << conceptpath(L("value")) << "\n"; L("val") = AttrValues(L("concept"),L("attr")); while (L("val")) { L("con") = getconval(L("val")); "unique.txt" << conceptname(L("con")) << "\n"; if (conceptpath(L("con")) == conceptpath(L("value"))) return 0; L("val") = nextval(L("val")); } addconval(L("concept"),L("attr"),L("value")); return 1; } PathToConcept(L("parent"),L("hier")) { L("cons") = split(L("hier")," "); L("i") = 0; L("con") = L("parent"); while (L("cons")[L("i")]) { L("c") = L("cons")[L("i")]; L("name") = strsubst(L("c"),"\"",0); if (L("name") != "concept") L("con") = AddUniqueCon(L("con"),L("name")); L("i")++; } return L("con"); } CopyAttr(L("from"),L("to"),L("attr")) { L("from value") = strval(L("from"),L("attr")); if (L("from value")) { L("to value") = strval(L("to"),L("attr")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr"),L("from value")); } } CopyAttrNew(L("from"),L("to"),L("attr from"),L("attr to")) { L("from value") = strval(L("from"),L("attr from")); if (L("from value")) { L("to value") = strval(L("to"),L("attr to")); if (L("from value") && !L("to value")) addstrval(L("to"),L("attr to"),L("from value")); } } CopyConAttr(L("from"),L("to"),L("attr")) { L("from value") = conval(L("from"),L("attr")); if (L("from value")) { L("to value") = conval(L("to"),L("attr")); if (L("from value") && !L("to value")) addconval(L("to"),L("attr"),L("from value")); } } AttrValues(L("con"),L("attr")) { L("at") = findattr(L("con"),L("attr")); if (L("at")) return attrvals(L("at")); return 0; } ValCount(L("vals")) { while (L("vals")) { L("count")++; L("vals") = nextval(L("vals")); } return L("count"); } LastChild(L("parent")) { L("child") = down(L("parent")); while (L("child")) { L("last") = L("child"); L("child") = next(L("child")); } return L("last"); } MakeCountCon(L("con"),L("count name")) { L("count name") = CountName(L("con"),L("count name")); return makeconcept(L("con"),L("count name")); } IncrementCount(L("con"),L("countname")) { L("count") = numval(L("con"),L("countname")); if (L("count")) { L("count") = L("count") + 1; replaceval(L("con"),L("countname"),L("count")); } else { addnumval(L("con"),L("countname"),1); L("count") = 1; } return L("count"); } CountName(L("con"),L("root")) { L("count") = IncrementCount(L("con"),L("root")); return L("root") + str(L("count")); } StripEndDigits(L("name")) { if (strisdigit(L("name"))) return 0; L("len") = strlength(L("name")) - 1; L("i") = L("len") - 1; L("str") = strpiece(L("name"),L("i"),L("len")); while (strisdigit(L("str")) && L("i")) { L("i")--; L("str") = strpiece(L("name"),L("i"),L("len")); } return strpiece(L("name"),0,L("i")); } ############################################### # KB Dump Functins ############################################### DumpKB(L("con"),L("file")) { L("dir") = G("$apppath") + "/kb/"; L("filename") = L("dir") + L("file") + ".kb"; if (!kbdumptree(L("con"),L("filename"))) { "kb.txt" << "FAILED dump: " << L("filename") << "\n"; } else { "kb.txt" << "DUMPED: " << L("filename") << "\n"; } } TakeKB(L("filename")) { L("path") = G("$apppath") + "/kb/" + L("filename") + ".kb"; "kb.txt" << "Taking: " << L("path") << "\n"; if (take(L("path"))) { "kb.txt" << " Taken successfully: " << L("path") << "\n"; } else { "kb.txt" << " Taken FAILED: " << L("path") << "\n"; } } ChildCount(L("con")) { L("count") = 0; L("child") = down(L("con")); while (L("child")) { L("count")++; L("child") = next(L("child")); } return L("count"); } ############################################### # KBB DISPLAY FUNCTIONS ############################################### ############################################### # display type: # 0 compact with ellipses on long attr values # 1 full, more spread out # 2 compact without ellipses on long attr values ############################################### DisplayKB(L("top con"),L("display type")) { L("file") = DisplayFileName(); DisplayKBRecurse(L("file"),L("top con"),0,L("display type")); L("file") << "\n"; return L("top con"); } KBHeader(L("text")) { L("file") = DisplayFileName(); L("file") << "#######################\n"; L("file") << "# " << L("text") << "\n"; L("file") << "#######################\n\n"; } DisplayFileName() { if (num(G("$passnum")) < 10) { L("file") = "ana00" + str(G("$passnum")); }else if (num(G("$passnum")) < 100) { L("file") = "ana0" + str(G("$passnum")); } else { L("file") = "ana" + str(G("$passnum")); } L("file") = L("file") + ".kbb"; return L("file"); } DisplayKBRecurse(L("file"),L("parent"),L("level"),L("display type")) { if (L("level") == 0) { L("file") << conceptname(L("parent")) << "\n"; } L("con") = down(L("parent")); while (L("con")) { L("file") << SpacesStr(L("level")+1) << conceptname(L("con")); DisplayAttributes(L("file"),L("con"),L("display type"),L("level")); L("file") << "\n"; if (down(L("con"))) { L("lev") = 1; DisplayKBRecurse(L("file"),L("con"),L("level")+L("lev"),L("display type")); } L("con") = next(L("con")); } } DisplayAttributes(L("file"),L("con"),L("display type"),L("level")) { L("attrs") = findattrs(L("con")); if (L("attrs")) L("file") << ": "; if (L("display type") == 1 && L("attrs")) L("file") << "\n"; L("first attr") = 1; while (L("attrs")) { L("vals") = attrvals(L("attrs")); L("count") = ValCount(L("vals")); if (L("display type") != 1 && !L("first attr")) { L("file") << ", "; } if (L("display type") == 1) { if (!L("first attr")) L("file") << "\n"; L("file") << SpacesStr(L("level")+2); } L("file") << attrname(L("attrs")) << "="; L("first") = 1; while (L("vals")) { L("val") = getstrval(L("vals")); L("num") = getnumval(L("vals")); L("con") = getconval(L("vals")); if (!L("first")) L("file") << ","; else if (L("count") > 1 && !L("con")) L("file") << "["; if (L("con")) { if (L("first")) L("file") << "["; L("file") << conceptpath(L("con")); } else if (L("display type") == 0 && strlength(L("val")) > 20) { L("shorty") = strpiece(L("val"),0,20); L("file") << L("shorty"); L("file") << "..."; if (strendswith(L("val"),"\"")) L("file") << "\""; } else if (L("num") > -1) { L("file") << str(L("num")); } else { if (DisplayValNeedsQuote(L("val"))) L("file") << "\""; L("file") << L("val"); if (DisplayValNeedsQuote(L("val"))) L("file") << "\""; } L("first") = 0; L("vals") = nextval(L("vals")); } if (L("con") || L("count") > 1) L("file") << "]"; L("first attr") = 0; L("attrs") = nextattr(L("attrs")); } } DisplayValNeedsQuote(L("str")) { if (strcontains(" ",L("str")) || strcontains("[",L("str")) || strcontains("]",L("str"))) return 1; return 0; } # Because NLP++ doesn't allow for empty strings, # this function can only be called with "num" >= 1 SpacesStr(L("num")) { L("n") = 1; L("spaces") = " "; while (L("n") < L("num")) { L("spaces") = L("spaces") + " "; L("n")++; } return L("spaces"); } PadStr(L("num str"),L("pad str"),L("pad len")) { L("len") = strlength(L("num str")); L("pad") = 0; L("to pad") = L("pad len") - L("len"); while (L("i")++ < L("to pad")) { L("pad") = L("pad") + L("pad str"); } L("padded") = L("pad") + L("num str"); return L("padded"); } ############################################### # DICTIONARY FUNCTIONS ############################################### DictionaryStart() { G("attrs path") = G("$apppath") + "\\kb\\user\\attrs.kb"; G("attrs") = openfile(G("attrs path")); } DictionaryWord(L("word"),L("attrName"),L("value"),L("attrType")) { addword(L("word")); addword(L("attrName")); G("attrs") << "ind attr\n" << findwordpath(L("word")) << "\n0\n"; G("attrs") << findwordpath(L("attrName")) << "\n"; if (L("attrType") == "str") G("attrs") << "pst\n" << "\"" << L("value") << "\""; else if (L("attrType") == "num") G("attrs") << "pnum\n" << str(L("value")); else if (L("attrType") == "con") G("attrs") << "pcon\n" << conceptpath(L("value")); G("attrs") << "\nend ind\n\n"; } DictionaryEnd() { G("attrs") << "\nquit\n\n"; closefile(G("attrs")); } @@DECL
@NODES _ROOT @POST N("text") = N("$treetext"); @RULES _xNIL <- _td ### (1) @@
@NODES _td @RULES _tag <- \< ### (1) _xWILD [fail=(\>)] ### (2) \> @@
# create a concept like noun and assign it as the value of a word's attribute. First, create the concepts named words and noun as children to the root of the KB (concept), and then make the concept book a child of words G("words") = makeconcept(findroot(), "words"); G("noun") = makeconcept(findroot(),"noun"); G("noun_book") = makeconcept(G("words"),"book");
@NODES _ROOT @PRE <1,1> var("word"); <1,1> var("female"); <1,1> var("male"); @POST "both.txt" << N("word") << " " << N("example") << "\n"; @RULES _xNIL <- _LINE ### (1) @@
@NODES _sentence @PRE <1,1> cap(); <2,2> cap(); @RULES # Ex: MD\_Files _company <- MD [s] Files [s] @@ @PRE <1,1> cap(); <2,2> cap(); <3,3> cap(); @RULES # Ex: New\_Hampshire\_Medical _company <- New [s] Hampshire [s] Medical [s] @@ @PRE <1,1> cap(); <2,2> cap(); @RULES # Ex: Carezani\_Med _company <- Carezani [s] Med [s] @@ @PRE <1,1> cap(); <1,1> length(3); <2,2> cap(); <2,2> length(9); <3,3> cap(); <3,3> length(7); @RULES # Ex: New\_Hampshire\_Medical _company <- New [s] Hampshire [s] Medical [s] @@
# Fetch the number from a numeric value @CODE # create a range G("range") = makeconcept(findroot(), "range"); addnumval(G("range"),"min",33); addnumval(G("range"),"max",118); # access data "output.txt" << "range = " <<   getnumval(findvals(G("range"), "max")) -   getnumval(findvals(G("range"), "min")) << "\n"; # clean up rmconcept(G("range"));
@PATH _ROOT _LINE _list @POST excise(1,1); @RULES _xNIL <- _curly ### (1) @@
@PATH _ROOT _row @RULES _header <- _thStart ### (1) _xWILD [fail=(_thEnd)] ### (2) _thEnd ### (3) @@ @RULES _column <- _tdStart ### (1) _xWILD [fail=(_tdEnd)] ### (2) _tdEnd ### (3) @@
@PATH _ROOT _attr _value _LINE @POST X("value",2) = N("word",1); @RULES _xNIL <- _string _xEND @@
@NODES _ROOT @POST G("record count")++; S("record name") = "record" + str(G("record count")); S("record con") = makeconcept(G("records con"),S("record name")); single(); @RULES _RECORD <- _xWILD [plus fails=(_lineTerminator) except=(_FIELDS)] ### (1) _xWILD [one match=(_lineTerminator _xEND)] ### (2) @@
# Remove dictionary concept wordString from the KB dictionary hierarchy @CODE "output.txt" << "1 " << conceptname(addword("hello")) << "\n"; "output.txt" << "2 " << conceptname(wordindex("hello")) << "\n"; "output.txt" << "3 " << findwordpath("hello") << "\n"; "output.txt" << "4 " << findwordpath("olleh") << "\n"; "output.txt" << "5 " << wordpath("foobaz") << "\n"; "output.txt" << "6 " << conceptname(dictfindword("hello")) << \n"; rmword("foobaz"); Prints out: 1 hello 2 he 3 "concept" "sys" "dict" "a" "h" "he" "hello" 4 5 "concept" "sys" "dict" "a" "f" "fo" "foobaz" 6 hello
# Convert string to SQL format "output.txt" << sqllstr("hello'bye") << "\n"; Outputs: hello''bye
@CODE DisplayKB(G("pred_codes"),1); sumRanks(down(G("pred_diagnoses"))); sumRanks(down(G("pred_procedures"))); convertToInt(down(G("pred_diagnoses")), 100); convertToInt(down(G("pred_procedures")), 100); G("procedure_pred_list"); G("diagnosis_pred_list"); L("proc_code_cons") = getChildCons(G("pred_procedures"), 0); L("diag_code_cons") = getChildCons(G("pred_diagnoses"), 0); L("top_diag_codes") = sortconsbyattr(L("diag_code_cons"), "rank", 1, 1); L("top_proc_codes") = sortconsbyattr(L("proc_code_cons"), "rank", 1, 1); # L("diag_preds") = conListToStrList(L("top_diag_codes")); # L("proc_preds") = conListToStrList(L("top_proc_codes")); L("f") = G("$inputhead") + "_preds.json"; # printJSONPreds(L("proc_preds"), L("diag_preds"), L("f"), 1); printJSONPreds(L("top_proc_codes"), L("top_diag_codes"), L("f"), 1); @@CODE
# Move a concept childConcept before previous sibling (Moves the concept to the 'left' or 'higher' in the pecking order.) @CODE "output.txt" << "move\n"; G("alpha") = makeconcept(findroot(),"first"); G("beta") = makeconcept(findroot(),"second"); G("gamma") = makeconcept(findroot(),"third"); movecleft(G("gamma")); movecright(G("alpha"));
@NODES _ROOT @POST S("section_title") = N("$text", 1); excise(4,4); excise(1,2); single(); @RULES _section <- _xWILD [fails=(\: _break)] ### (1) \: ### (2) _xWILD [fails=(_break _xEND)] ### (3) _xWILD [one matches=(_break _xEND)] ### (4) @@ @RULES _looseText <- _xWILD [fails=(_section _break)] @@ @POST excise(1,1); @RULES _xNIL <- _break @@
# Reduce the _det and _noun to an _np.. REMOVE the node that matched element 3 from the parse tree @POST singlex(1,2); @RULES _np <- _det _noun _xWILD [one fail=(_noun)] @@
@PATH _ROOT _LINE @CHECK if ( (N("hi class",1) == N("hi class",5)) && ((N("hi conf",1) >= G("threshold")) || (N("hi conf",5) >= G("threshold"))) ) succeed(); fail(); @POST # Want a merge variant that keeps everything as is between the # two nodes. Could add a boolean to it. S("hi class") = N("hi class",1); S("hi conf") = N("hi conf",1) %% N("hi conf",5); # Combine conf! # Combine the count variables. S("fields") = N("fields",1) + N("fields",5); # and so on.....need a compose() action to do all this! S("company conf") = N("company conf",1) %% N("company conf",5); S("job conf") = N("job conf",1) %% N("job conf",5); S("field conf") = N("field conf",1) %% N("field conf",5); S("humanname conf") = N("humanname conf",1) %% N("humanname conf",5); merge(); @RULES _Caps [unsealed] <- _Caps _xWHITE [s star] _xWILD [s one match=( and \/ \& )] _xWHITE [s star] _Caps @@ # If there's a clash in types, there are several cases. # Parens because we've glommed the of stuff first, rightfully or not. # A and (B of C) # (A of B) and C # (A of B) and (C of D) @CHECK if ( N("capofcap",1) || N("capofcap",5) ) succeed(); fail(); @POST # Let the left side dominate. Copy everything. S("capandcap") = 1; # Flag what this is. # 12/30/99 AM. # Should transfer from 1st caps (dominant) to new list. S("hi class") = N("hi class",1); S("hi conf") = N("hi conf",1); S("ambigs") = N("ambigs"); # etc. S("humanname conf") = N("humanname conf",1); S("company conf") = N("company conf",1); S("field conf") = N("field conf",1); S("job conf") = N("job conf",1); S("school conf") = N("school conf",1); single(); @RULES _Caps [unsealed] <- _Caps _xWHITE [s star] _xWILD [s one match=( and )] _xWHITE [s star] _Caps @@ # A and B # If B is solid, use it. @POST S("capandcap") = 1; # Flag what this is. # 12/30/99 AM. if (N("hi conf",1) > N("hi conf",5)) { # Should transfer from 1st caps (dominant) to new list. S("hi class") = N("hi class",1); S("hi conf") = N("hi conf",1); S("ambigs") = N("ambigs"); # etc. S("humanname conf") = N("humanname conf",1); S("company conf") = N("company conf",1); S("field conf") = N("field conf",1); S("job conf") = N("job conf",1); S("school conf") = N("school conf",1); } else { S("hi class") = N("hi class",5); S("hi conf") = N("hi conf",5); S("ambigs") = N("ambigs"); # etc. S("humanname conf") = N("humanname conf",5); S("company conf") = N("company conf",5); S("field conf") = N("field conf",5); S("job conf") = N("job conf",5); S("school conf") = N("school conf",5); } single(); @RULES _Caps [unsealed] <- _Caps _xWHITE [s star] _xWILD [s one match=( and )] _xWHITE [s star] _Caps @@
# @NODES _REZZONE @PATH _ROOT _contactZone # 10/19/99 AM. # @NODES _contactZone # # DON'T SEAL? @RULES _addressBlock <- _addressPart [tree] _xWILD [tree star match=(_addressPart _BLANKLINE _horizRule)] @@ # So using TREE instead of SINGLET. _addressBlock <- _addressPart [tree min=1 max=10] @@ # because it doesn't account for things like comma separators. # Would be good to have a rule type that lets a phrase repeat. # eg, _edBlock <- _edPart { \, _edPart } [] @@ # Also, it crosses line boundaries, which we would want to do with # some care. Could grab a bunch of schools into one. # Form I - Handle education info on one line -- good enough for now! #_educationBlock <- _educationPart [s min=1 max=10] @@
# This will match words such as junk, junks, junky, junkyard @PRE <1,1> regexp("junk*"); @POST group(1,1,"_junkword"); @RULES _xNIL <- _xALPHA @@
@CODE DisplayKB(G("words"), 1); @@CODE
# Fetch the name of an knowledge base attribute G("myConcept") = makeconcept(findroot(),"a concept"); G("myAttr") = addattr(G("myConcept"),"an attribute"); "output.txt" << attrname(G("myAttr")) << "\n";
@CODE if (!G("pretagged")) exitpass(); @@CODE @MULTI _ROOT # Highlight mismatches to pretagged text. @POST noop(); @RULES _xNIL <- _poserr @@
@NODES _LINE @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: Senior\_Software\_Engineer _jobTitle [layer=(_Caps )] <- Senior [s] _xWHITE [star s] Software [s] _xWHITE [star s] Engineer [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Network\_Technician _jobTitle [layer=(_Caps )] <- Network [s] _xWHITE [star s] Technician [s] @@ @PRE <1,1> cap(); @RULES # Ex: Senior\_software\_engineer _jobTitle [layer=(_Caps )] <- Senior [s] _xWHITE [star s] software [s] _xWHITE [star s] engineer [s] @@ @RULES # Ex: software\_engineer _jobTitle [layer=(_Caps )] <- software [s] _xWHITE [star s] engineer [s] @@ @PRE <1,1> cap(); @RULES # Ex: Project\_manager _jobTitle [layer=(_Caps )] <- Project [s] _xWHITE [star s] manager [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: Sr\_Software\_Engineer _jobTitle [layer=(_Caps )] <- Sr [s] _xWHITE [star s] Software [s] _xWHITE [star s] Engineer [s] @@ @PRE <1,1> cap(); @RULES # Ex: Consultant _jobTitle [layer=(_Caps )] <- Consultant [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: Lead\_Analyst/Programmer _jobTitle [layer=(_Caps )] <- Lead [s] _xWHITE [star s] Analyst [s] \/ [s] Programmer [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Applications\_Programmer _jobTitle [layer=(_Caps )] <- Applications [s] _xWHITE [star s] Programmer [s] @@ # Ex: Research\_Assistant _jobTitle [layer=(_Caps )] <- Research [s] _xWHITE [star s] Assistant [s] @@ # Ex: Network\_Consultant _jobTitle [layer=(_Caps )] <- Network [s] _xWHITE [star s] Consultant [s] @@ @PRE <1,1> cap(); @RULES # Ex: Scientist _jobTitle [layer=(_Caps )] <- Scientist [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Scientist\_Trainee _jobTitle [layer=(_Caps )] <- Scientist [s] _xWHITE [star s] Trainee [s] @@ # Ex: Systems\_Analyst _jobTitle [layer=(_Caps )] <- Systems [s] _xWHITE [star s] Analyst [s] @@ @PRE <1,1> cap(); @RULES # Ex: Stockbroker _jobTitle [layer=(_Caps )] <- Stockbroker [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Technical\_Writer _jobTitle [layer=(_Caps )] <- Technical [s] _xWHITE [star s] Writer [s] @@ # Ex: Product\_Manager _jobTitle [layer=(_Caps )] <- Product [s] _xWHITE [star s] Manager [s] @@ # Ex: Program\_Manager _jobTitle [layer=(_Caps )] <- Program [s] _xWHITE [star s] Manager [s] @@ # Ex: Systems\_Engineer _jobTitle [layer=(_Caps )] <- Systems [s] _xWHITE [star s] Engineer [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: PC\_Support\_Specialist _jobTitle [layer=(_Caps )] <- PC [s] _xWHITE [star s] Support [s] _xWHITE [star s] Specialist [s] @@ @PRE <1,1> cap(); @RULES # Ex: Specialist _jobTitle [layer=(_Caps )] <- Specialist [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Lab\_Assistant _jobTitle [layer=(_Caps )] <- Lab [s] _xWHITE [star s] Assistant [s] @@ # Ex: Programmer\_Specialist _jobTitle [layer=(_Caps )] <- Programmer [s] _xWHITE [star s] Specialist [s] @@ @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: Staff\_Programmer\_Analyst _jobTitle [layer=(_Caps )] <- Staff [s] _xWHITE [star s] Programmer [s] _xWHITE [star s] Analyst [s] @@ # Ex: Information\_Resource\_Analyst _jobTitle [layer=(_Caps )] <- Information [s] _xWHITE [star s] Resource [s] _xWHITE [star s] Analyst [s] @@ @PRE <1,1> cap(); @RULES # Ex: System\_administrator _jobTitle [layer=(_Caps )] <- System [s] _xWHITE [star s] administrator [s] @@ @RULES # Ex: system\_administrator _jobTitle [layer=(_Caps )] <- system [s] _xWHITE [star s] administrator [s] @@ # Ex: programmer _jobTitle [layer=(_Caps )] <- programmer [s] @@ @PRE <1,1> cap(); @RULES # Ex: Teacher _jobTitle [layer=(_Caps )] <- Teacher [s] @@
@NODES _ROOT @POST "headerzone.txt" << N("text",1) << "\n\n"; S("header") = N("text",1); single(); @RULES _headerZone <- _header ### (1) _xWILD [plus fail=(_header _bottom)] ### (2) @@
# Rename concept aConcept with new name newConceptNameStr if(findconcept(findroot(),"apples")) rmconcept(findconcept(findroot(),"apples")); G("apples") = makeconcept(findroot(),"apples"); addstrval(G("apples"),"have","seeds"); renameconcept(G("apples"),"fruit"); "output.txt" << conceptname(G("apples")) << "\n";
@PATH _ROOT _educationZone _educationInstance _LINE @POST if (!X("city",3)) X("city",3) = N("$text"); # noop() @RULES _xNIL <- _city [s] @@
@NODES _humanNameCandidate @RULES _addressPart <- _humanName @@
# Reduce the _det and _noun to an _np, but not the node that matched element 3 @POST  singler(1,2); @RULES _np <- _det _noun _xWILD [one fail=(_noun)] @@
@CODE # Add all euis in eui_to_codes to P_euis, with count attr L("eui_iter") = G("eui_to_codes") @@CODE
@PATH _ROOT _LINE @POST excise(1,1); noop(); @RULES _xNIL <- _xWHITE [s] ### (1) @@