| /* |
| [The "BSD license"] |
| Copyright (c) 2005-2009 Gokulakannan Somasundaram, |
| |
| All rights reserved. |
| |
| Redistribution and use in source and binary forms, with or without |
| modification, are permitted provided that the following conditions |
| are met: |
| 1. Redistributions of source code must retain the above copyright |
| notice, this list of conditions and the following disclaimer. |
| 2. Redistributions in binary form must reproduce the above copyright |
| notice, this list of conditions and the following disclaimer in the |
| documentation and/or other materials provided with the distribution. |
| 3. The name of the author may not be used to endorse or promote products |
| derived from this software without specific prior written permission. |
| |
| THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| */ |
| |
| /* |
| * This code generating template and the associated Cpp runtime was produced by: |
| * Gokulakannan Somasundaram ( heavy lifting from C Run-time by Jim Idle ) |
| */ |
| cTypeInitMap ::= [ |
| "int" : "0", // Integers start out being 0 |
| "long" : "0", // Longs start out being 0 |
| "float" : "0.0", // Floats start out being 0 |
| "double" : "0.0", // Doubles start out being 0 |
| "bool" : "false", // Booleans start out being Antlr C for false |
| "byte" : "0", // Bytes start out being 0 |
| "short" : "0", // Shorts start out being 0 |
| "char" : "0" // Chars start out being 0 |
| ] |
| |
| leadIn(type) ::= |
| << |
| /** \file |
| * This <type> file was generated by $ANTLR version <ANTLRVersion> |
| * |
| * - From the grammar source file : <fileName> |
| * - On : <generatedTimestamp> |
| <if(LEXER)> |
| * - for the lexer : <name>Lexer |
| <endif> |
| <if(PARSER)> |
| * - for the parser : <name>Parser |
| <endif> |
| <if(TREE_PARSER)> |
| * - for the tree parser : <name>TreeParser |
| <endif> |
| * |
| * Editing it, at least manually, is not wise. |
| * |
| * C++ language generator and runtime by Gokulakannan Somasundaram ( heavy lifting from C Run-time by Jim Idle ) |
| * |
| * |
| >> |
| |
| /** The overall file structure of a recognizer; stores methods for rules |
| * and cyclic DFAs plus support code. |
| */ |
| outputFile( LEXER, |
| PARSER, |
| TREE_PARSER, |
| actionScope, |
| actions, |
| docComment, |
| recognizer, |
| name, |
| tokens, |
| tokenNames, |
| rules, |
| cyclicDFAs, |
| bitsets, |
| buildTemplate, |
| buildAST, |
| rewriteMode, |
| profile, |
| backtracking, |
| synpreds, |
| memoize, |
| numRules, |
| fileName, |
| ANTLRVersion, |
| generatedTimestamp, |
| trace, |
| scopes, |
| superClass, |
| literals |
| ) ::= |
| << |
| <leadIn("C++ source")> |
| */ |
| // [The "BSD license"] |
| // Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB |
| // |
| // All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions |
| // are met: |
| // 1. Redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer. |
| // 2. Redistributions in binary form must reproduce the above copyright |
| // notice, this list of conditions and the following disclaimer in the |
| // documentation and/or other materials provided with the distribution. |
| // 3. The name of the author may not be used to endorse or promote products |
| // derived from this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| // IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| // NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| <if(actions.(actionScope).header)> |
| |
| /* ============================================================================= |
| * This is what the grammar programmer asked us to put at the top of every file. |
| */ |
| <actions.(actionScope).header> |
| /* End of Header action. |
| * ============================================================================= |
| */ |
| <endif> |
| |
| /* ----------------------------------------- |
| * Include the ANTLR3 generated header file. |
| */ |
| #include "<name>.hpp" |
| <if(trace)> |
| #include \<iostream> |
| <endif> |
| <if(recognizer.grammar.delegators)> |
| // Include delegator definition header files |
| // |
| <recognizer.grammar.delegators: {g|#include "<g.recognizerName>.hpp" }; separator="\n"> |
| <endif> |
| |
| <actions.(actionScope).postinclude> |
| /* ----------------------------------------- */ |
| |
| <docComment> |
| |
| <if(literals)> |
| |
| <beginNamespace(actions)> |
| |
| /** String literals used by <name> that we must do things like MATCHS() with. |
| * C will normally just lay down 8 bit characters, and you can use L"xxx" to |
| * get wchar_t, but wchar_t is 16 bits on Windows, which is not UTF32 and so |
| * we perform this little trick of defining the literals as arrays of UINT32 |
| * and passing in the address of these. |
| */ |
| <literals:{it | static ANTLR_UCHAR lit_<i>[] = <it>;}; separator="\n"> |
| |
| <endNamespace(actions)> |
| |
| <endif> |
| |
| /* ============================================================================= */ |
| |
| /* ============================================================================= |
| * Start of recognizer |
| */ |
| |
| <recognizer> |
| |
| /* End of code |
| * ============================================================================= |
| */ |
| |
| >> |
| headerFileExtension() ::= ".hpp" |
| |
| beginNamespace(actions) ::= <% |
| <if(actions.(actionScope).namespace)> |
| namespace <actions.(actionScope).namespace> { |
| <endif> |
| %> |
| |
| endNamespace(actions) ::= <% |
| <if(actions.(actionScope).namespace)> |
| } |
| <endif> |
| %> |
| |
| |
| headerFile( LEXER, |
| PARSER, |
| TREE_PARSER, |
| actionScope, |
| actions, |
| docComment, |
| recognizer, |
| name, |
| tokens, |
| tokenNames, |
| rules, |
| cyclicDFAs, |
| bitsets, |
| buildTemplate, |
| buildAST, |
| rewriteMode, |
| profile, |
| backtracking, |
| synpreds, |
| memoize, |
| numRules, |
| fileName, |
| ANTLRVersion, |
| generatedTimestamp, |
| trace, |
| scopes, |
| superClass, |
| literals |
| ) ::= |
| << |
| <leadIn("C++ header")> |
| <if(PARSER)> |
| * The parser <mainName()> has the callable functions (rules) shown below, |
| <endif> |
| <if(LEXER)> |
| * The lexer <mainName()> has the callable functions (rules) shown below, |
| <endif> |
| <if(TREE_PARSER)> |
| * The tree parser <mainName()> has the callable functions (rules) shown below, |
| <endif> |
| * which will invoke the code for the associated rule in the source grammar |
| * assuming that the input stream is pointing to a token/text stream that could begin |
| * this rule. |
| * |
| * For instance if you call the first (topmost) rule in a parser grammar, you will |
| * get the results of a full parse, but calling a rule half way through the grammar will |
| * allow you to pass part of a full token stream to the parser, such as for syntax checking |
| * in editors and so on. |
| * |
| */ |
| // [The "BSD license"] |
| // Copyright (c) 2005-2009 Gokulakannan Somasundaram. |
| // |
| // All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions |
| // are met: |
| // 1. Redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer. |
| // 2. Redistributions in binary form must reproduce the above copyright |
| // notice, this list of conditions and the following disclaimer in the |
| // documentation and/or other materials provided with the distribution. |
| // 3. The name of the author may not be used to endorse or promote products |
| // derived from this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| // IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| // NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| #ifndef _<name>_H |
| #define _<name>_H |
| <actions.(actionScope).preincludes> |
| /* ============================================================================= |
| * Standard antlr3 C++ runtime definitions |
| */ |
| #include \<antlr3.hpp> |
| |
| /* End of standard antlr 3 runtime definitions |
| * ============================================================================= |
| */ |
| |
| <actions.(actionScope).includes> |
| |
| <if(recognizer.grammar.delegates)> |
| // Include delegate definition header files |
| // |
| <recognizer.grammar.delegates: {g|#include "<g.recognizerName>.hpp"}; separator="\n"> |
| |
| <endif> |
| |
| |
| <actions.(actionScope).header> |
| |
| #ifdef WIN32 |
| // Disable: Unreferenced parameter, - Rules with parameters that are not used |
| // constant conditional, - ANTLR realizes that a prediction is always true (synpred usually) |
| // initialized but unused variable - tree rewrite variables declared but not needed |
| // Unreferenced local variable - lexer rule declares but does not always use _type |
| // potentially unitialized variable used - retval always returned from a rule |
| // unreferenced local function has been removed - susually getTokenNames or freeScope, they can go without warnigns |
| // |
| // These are only really displayed at warning level /W4 but that is the code ideal I am aiming at |
| // and the codegen must generate some of these warnings by necessity, apart from 4100, which is |
| // usually generated when a parser rule is given a parameter that it does not use. Mostly though |
| // this is a matter of orthogonality hence I disable that one. |
| // |
| #pragma warning( disable : 4100 ) |
| #pragma warning( disable : 4101 ) |
| #pragma warning( disable : 4127 ) |
| #pragma warning( disable : 4189 ) |
| #pragma warning( disable : 4505 ) |
| #pragma warning( disable : 4701 ) |
| #endif |
| <if(backtracking)> |
| |
| /* ======================== |
| * BACKTRACKING IS ENABLED |
| * ======================== |
| */ |
| <endif> |
| |
| <beginNamespace(actions)> |
| |
| <if(recognizer.grammar.delegators)> |
| // Include delegator definition classes |
| // |
| <recognizer.grammar.delegators: {g|class <g.recognizerName>; }; separator="\n"> |
| <endif> |
| |
| <actions.(actionScope).traits> |
| typedef <name>Traits <name>ImplTraits; |
| |
| <rules:{r | <if(r.ruleDescriptor.isSynPred)> struct <r.ruleDescriptor.name> {\}; <endif>}; separator="\n"> |
| |
| class <name>Tokens |
| { |
| public: |
| /** Symbolic definitions of all the tokens that the <grammarType()> will work with. |
| * |
| * Antlr will define EOF, but we can't use that as it it is too common in |
| * in C header files and that would be confusing. There is no way to filter this out at the moment |
| * so we just undef it here for now. That isn't the value we get back from C recognizers |
| * anyway. We are looking for ANTLR_TOKEN_EOF. |
| */ |
| enum Tokens |
| { |
| EOF_TOKEN = <name>ImplTraits::CommonTokenType::TOKEN_EOF |
| <tokens:{it | , <it.name> = <it.type> }; separator="\n"> |
| }; |
| |
| }; |
| |
| /** Context tracking structure for <mainName()> |
| */ |
| class <name> : public <componentBaseType()>, public <name>Tokens |
| { |
| public: |
| typedef <name>ImplTraits ImplTraits; |
| typedef <name> ComponentType; |
| typedef ComponentType::StreamType StreamType; |
| typedef <componentBaseType()> BaseType; |
| typedef ImplTraits::RecognizerSharedStateType\<StreamType> RecognizerSharedStateType; |
| typedef StreamType InputType; |
| <if(recognizer.filterMode)> |
| static const bool IsFiltered = true; |
| <else> |
| static const bool IsFiltered = false; |
| <endif> |
| |
| <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeDecl(it)><endif>}> |
| <rules:{r | <if(r.ruleDescriptor.ruleScope)><ruleAttributeScopeDecl(scope=r.ruleDescriptor.ruleScope)><endif>}> |
| |
| private: |
| <if(recognizer.grammar.delegates)> |
| <recognizer.grammar.delegates: |
| {g|<g.recognizerName>* m_<g:delegateName()>;}; separator="\n"> |
| <endif> |
| <if(recognizer.grammar.delegators)> |
| <recognizer.grammar.delegators: |
| {g|<g.recognizerName>* m_<g:delegateName()>;}; separator="\n"> |
| <endif> |
| <scopes:{it | <if(it.isDynamicGlobalScope)> |
| <globalAttributeScopeDef(it)> |
| <endif>}; separator="\n\n"> |
| <rules: {r |<if(r.ruleDescriptor.ruleScope)> |
| <ruleAttributeScopeDef(scope=r.ruleDescriptor.ruleScope)> |
| <endif>}> |
| <@members> |
| <@end> |
| |
| public: |
| <name>(InputType* instream<recognizer.grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>); |
| <name>(InputType* instream, RecognizerSharedStateType* state<recognizer.grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>); |
| |
| void init(InputType* instream <recognizer.grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}> ); |
| |
| <actions.(actionScope).context> |
| |
| <if(LEXER)> |
| <if(recognizer.filterMode)> |
| void memoize(ANTLR_MARKER ruleIndex, ANTLR_MARKER ruleParseStart); |
| bool alreadyParsedRule(ANTLR_MARKER ruleIndex); |
| <filteringNextToken()> |
| <endif> |
| <rules:{r | <if(!r.ruleDescriptor.isSynPred)><headerReturnType(ruleDescriptor=r.ruleDescriptor)> m<r.ruleDescriptor.name>( <r.ruleDescriptor.parameterScope:parameterScope()>);<endif>}; separator="\n"> |
| <rules:{r | <if(r.ruleDescriptor.isSynPred)> <headerReturnType(ruleDescriptor=r.ruleDescriptor)> msynpred( antlr3::ClassForwarder\< <r.ruleDescriptor.name> > <r.ruleDescriptor.parameterScope:parameterScope()>); |
| void m<r.ruleDescriptor.name>_fragment (<r.ruleDescriptor.parameterScope:parameterScope()>);<endif>}; separator="\n"> |
| <endif> |
| <if(!LEXER)> |
| <rules:{r | <headerReturnScope(ruleDescriptor=r.ruleDescriptor)>}> |
| <rules:{r | <if(!r.ruleDescriptor.isSynPred)> <headerReturnType(ruleDescriptor=r.ruleDescriptor)> <r.ruleDescriptor.name> (<r.ruleDescriptor.parameterScope:parameterScope()>); <endif>}; separator="\n"> |
| <rules:{r | <if(r.ruleDescriptor.isSynPred)> <headerReturnType(ruleDescriptor=r.ruleDescriptor)> msynpred( antlr3::ClassForwarder\< <r.ruleDescriptor.name> > <r.ruleDescriptor.parameterScope:parameterScope()>); |
| void m<r.ruleDescriptor.name>_fragment (<r.ruleDescriptor.parameterScope:parameterScope()>);<endif>}; separator="\n"> |
| <! generate rule/method definitions for imported rules so they |
| appear to be defined in this recognizer. !> |
| // Delegated rules |
| <recognizer.grammar.delegatedRules:{ruleDescriptor| |
| <headerReturnType(ruleDescriptor)> <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n"> |
| <endif> |
| |
| const char * getGrammarFileName(); |
| void reset(); |
| ~<name>(); |
| |
| }; |
| |
| // Function protoypes for the constructor functions that external translation units |
| // such as delegators and delegates may wish to call. |
| // |
| <if(!recognizer.grammar.grammarIsRoot)> |
| extern ANTLR_UINT8* <recognizer.grammar.composite.rootGrammar.recognizerName>TokenNames[]; |
| <endif> |
| |
| |
| /* End of token definitions for <name> |
| * ============================================================================= |
| */ |
| |
| <endNamespace(actions)> |
| |
| #endif |
| |
| /* END - Note:Keep extra line feed to satisfy UNIX systems */ |
| |
| >> |
| |
| grammarType() ::= <% |
| <if(PARSER)> |
| parser |
| <endif> |
| <if(LEXER)> |
| lexer |
| <endif> |
| <if(TREE_PARSER)> |
| tree parser |
| <endif> |
| %> |
| |
| componentType() ::= << |
| <if(PARSER)> |
| <name>ImplTraits::ParserType |
| <endif> |
| <if(LEXER)> |
| <name>ImplTraits::LexerType |
| <endif> |
| <if(TREE_PARSER)> |
| <name>ImplTraits::TreeParserType |
| <endif> |
| >> |
| |
| componentBaseType() ::= <% |
| <if(PARSER)> |
| <name>ImplTraits::BaseParserType |
| <endif> |
| <if(LEXER)> |
| <name>ImplTraits::BaseLexerType |
| <endif> |
| <if(TREE_PARSER)> |
| <name>ImplTraits::BaseTreeParserType |
| <endif> |
| %> |
| |
| streamType() ::= << |
| <if(PARSER)> |
| <name>ImplTraits::ParserType::StreamType |
| <endif> |
| <if(LEXER)> |
| <name>ImplTraits::LexerType::StreamType |
| <endif> |
| <if(TREE_PARSER)> |
| <name>ImplTraits::TreeParserType::StreamType |
| <endif> |
| >> |
| |
| |
| mainName() ::= <% |
| <if(PARSER)> |
| <name> |
| <endif> |
| <if(LEXER)> |
| <name> |
| <endif> |
| <if(TREE_PARSER)> |
| <name> |
| <endif> |
| %> |
| |
| headerReturnScope(ruleDescriptor) ::= "<returnScope(scope=ruleDescriptor.returnScope)>" |
| |
| headerReturnType(ruleDescriptor) ::= <% |
| <if(LEXER)> |
| <if(!ruleDescriptor.isSynPred)> |
| void |
| <else> |
| <returnType()> |
| <endif> |
| <else> |
| <returnType()> |
| <endif> |
| %> |
| |
| // Produce the lexer output |
| // |
| lexer( grammar, |
| name, |
| tokens, |
| scopes, |
| rules, |
| numRules, |
| filterMode, |
| superClass, |
| labelType="ImplTraits::CommonTokenType*") ::= << |
| |
| using namespace antlr3; |
| |
| <beginNamespace(actions)> |
| |
| <if(filterMode)> |
| |
| /* Override the normal MEMOIZE and HAVEALREADYPARSED macros as this is a filtering |
| * lexer. In filter mode, the memoizing and backtracking are gated at BACKTRACKING > 1 rather |
| * than just BACKTRACKING. In some cases this might generate code akin to: |
| * if (BACKTRACKING) if (BACKTRACKING > 1) memoize. |
| */ |
| void <name>::memoize(ANTLR_MARKER ruleIndex, ANTLR_MARKER ruleParseStart) |
| { |
| BaseType* base = this; |
| if ( this->get_backtracking()>1 ) |
| base->memoize( ruleIndex, ruleParseStart ); |
| |
| } |
| |
| bool <name>::alreadyParsedRule(ANTLR_MARKER ruleIndex) |
| { |
| BaseType* base = this; |
| if ( this->get_backtracking() > 1 ) |
| return base->haveParsedRule(ruleIndex); |
| return false; |
| } |
| |
| <endif> |
| |
| /* ========================================================================= |
| * Lexer matching rules end. |
| * ========================================================================= |
| */ |
| |
| <scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}> |
| |
| <actions.lexer.members> |
| |
| <name>::~<name>() |
| { |
| <if(memoize)> |
| RuleMemoType* rulememo = this->getRuleMemo(); |
| if(rulememo != NULL) |
| { |
| delete rulememo; |
| this->setRuleMemo(NULL); |
| } |
| <endif> |
| <if(grammar.directDelegates)> |
| // Free the lexers that we delegated to |
| // functions to. NULL the state so we only free it once. |
| // |
| <grammar.directDelegates: |
| {g| m_<g:delegateName()>->set_lexstate(NULL); |
| delete m_<g:delegateName()>; }; separator="\n"> |
| <endif> |
| } |
| |
| void |
| <name>::reset() |
| { |
| this->get_rec()->reset(); |
| } |
| |
| /** \brief Name of the grammar file that generated this code |
| */ |
| static const char fileName[] = "<fileName>"; |
| |
| /** \brief Return the name of the grammar file that generated this code. |
| */ |
| const char* <name>::getGrammarFileName() |
| { |
| return fileName; |
| } |
| |
| /** \brief Create a new lexer called <name> |
| * |
| * \param[in] instream Pointer to an initialized input stream |
| * \return |
| * - Success p<name> initialized for the lex start |
| * - Fail NULL |
| */ |
| <name>::<name>(StreamType* instream<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>) |
| :<name>ImplTraits::BaseLexerType(ANTLR_SIZE_HINT, instream, NULL) |
| { |
| // See if we can create a new lexer with the standard constructor |
| // |
| this->init(instream <grammar.delegators:{g|, <g:delegateName()>}>); |
| } |
| |
| /** \brief Create a new lexer called <name> |
| * |
| * \param[in] instream Pointer to an initialized input stream |
| * \param[state] state Previously created shared recognizer stat |
| * \return |
| * - Success p<name> initialized for the lex start |
| * - Fail NULL |
| */ |
| <name>::<name>(StreamType* instream, RecognizerSharedStateType* state<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>) |
| :<name>ImplTraits::BaseLexerType(ANTLR_SIZE_HINT, instream, state) |
| { |
| this->init(instream <grammar.delegators:{g|, <g:delegateName()>} >); |
| } |
| |
| void <name>::init(StreamType* instream<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>} >) |
| { |
| /* ------------------------------------------------------------------- |
| * Memory for basic structure is allocated, now to fill in |
| * in base ANTLR3 structures. We initialize the function pointers |
| * for the standard ANTLR3 lexer function set, but upon return |
| * from here, the programmer may set the pointers to provide custom |
| * implementations of each function. |
| * |
| * We don't use the macros defined in <name>.h here so you can get a sense |
| * of what goes where. |
| */ |
| |
| <if(memoize)> |
| <if(grammar.grammarIsRoot)> |
| // Create a LIST for recording rule memos. |
| // |
| this->setRuleMemo( new IntTrie(15) ); /* 16 bit depth is enough for 32768 rules! */ |
| <endif> |
| <endif> |
| |
| <if(grammar.directDelegates)> |
| // Initialize the lexers that we are going to delegate some |
| // functions to. |
| // |
| <grammar.directDelegates: |
| {g|m_<g:delegateName()> = new <g.recognizerName>(instream, this->get_lexstate(), this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n"> |
| <endif> |
| <if(grammar.delegators)> |
| // Install the pointers back to lexers that will delegate us to perform certain functions |
| // for them. |
| // |
| <grammar.delegators: |
| {g| m_<g:delegateName()> = <g:delegateName()>;}; separator="\n"> |
| <endif> |
| } |
| |
| <if(cyclicDFAs)> |
| |
| /* ========================================================================= |
| * DFA tables for the lexer |
| */ |
| <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !> |
| /* ========================================================================= |
| * End of DFA tables for the lexer |
| */ |
| <endif> |
| |
| /* ========================================================================= |
| * Functions to match the lexer grammar defined tokens from the input stream |
| */ |
| |
| <rules; separator="\n\n"> |
| |
| /* ========================================================================= |
| * Lexer matching rules end. |
| * ========================================================================= |
| */ |
| <if(synpreds)> |
| |
| /* ========================================================================= |
| * Lexer syntactic predicates |
| */ |
| <synpreds:{p | <lexerSynpred(predname=p)>}> |
| /* ========================================================================= |
| * Lexer syntactic predicates end. |
| * ========================================================================= |
| */ |
| <endif> |
| |
| /* End of Lexer code |
| * ================================================ |
| * ================================================ |
| */ |
| |
| <endNamespace(actions)> |
| |
| >> |
| |
| |
| filteringNextToken() ::= << |
| <name>ImplTraits::CommonTokenType* |
| <name>ImplTraits::TokenSourceType::nextToken() |
| { |
| LexerType* lexer; |
| typename LexerType::RecognizerSharedStateType* state; |
| |
| lexer = this->get_super(); |
| state = lexer->get_lexstate(); |
| |
| /* Get rid of any previous token (token factory takes care of |
| * any deallocation when this token is finally used up. |
| */ |
| state->set_token_present( false ); |
| state->set_error( false ); /* Start out without an exception */ |
| state->set_failedflag(false); |
| |
| /* Record the start of the token in our input stream. |
| */ |
| state->set_tokenStartCharIndex( lexer->index(); |
| state->set_tokenStartCharPositionInLine( lexer->getCharPositionInLine() ); |
| state->set_tokenStartLine( lexer->getLine() ); |
| state->set_text(""); |
| |
| /* Now call the matching rules and see if we can generate a new token |
| */ |
| for (;;) |
| { |
| if (lexer->LA(1) == ANTLR_CHARSTREAM_EOF) |
| { |
| /* Reached the end of the stream, nothing more to do. |
| */ |
| CommonTokenType& teof = m_eofToken; |
| |
| teof.set_startIndex(lexer->getCharIndex()); |
| teof.set_stopIndex(lexer->getCharIndex()); |
| teof.setLine(lexer->getLine()); |
| return &teof; |
| } |
| |
| state->set_token_present(false); |
| state->set_error(false); /* Start out without an exception */ |
| |
| { |
| ANTLR_MARKER m; |
| |
| m = this->get_istream()->mark(); |
| state->set_backtracking(1); /* No exceptions */ |
| state->set_failedflag(false); |
| |
| /* Call the generated lexer, see if it can get a new token together. |
| */ |
| lexer->mTokens(); |
| state->set_backtracking(0); |
| |
| <! mTokens backtracks with synpred at BACKTRACKING==2 |
| and we set the synpredgate to allow actions at level 1. !> |
| |
| if(state->get_failed()) |
| { |
| lexer->rewind(m); |
| lexer->consume(); <! advance one char and try again !> |
| } |
| else |
| { |
| lexer->emit(); /* Assemble the token and emit it to the stream */ |
| TokenType& tok = state->get_token(); |
| return &tok; |
| } |
| } |
| } |
| } |
| >> |
| |
| actionGate() ::= "this->get_backtracking()==0" |
| |
| filteringActionGate() ::= "this->get_backtracking()==1" |
| |
| /** How to generate a parser */ |
| genericParser( grammar, name, scopes, tokens, tokenNames, rules, numRules, |
| bitsets, inputStreamType, superClass, |
| labelType, members, rewriteElementType, |
| filterMode, ASTLabelType="ImplTraits::TreeType*") ::= << |
| |
| using namespace antlr3; |
| <if(grammar.grammarIsRoot)> |
| /** \brief Table of all token names in symbolic order, mainly used for |
| * error reporting. |
| */ |
| ANTLR_UINT8* <name>TokenNames[<length(tokenNames)>+4] |
| = { |
| (ANTLR_UINT8*) "\<invalid>", /* String to print to indicate an invalid token */ |
| (ANTLR_UINT8*) "\<EOR>", |
| (ANTLR_UINT8*) "\<DOWN>", |
| (ANTLR_UINT8*) "\<UP>", |
| <tokenNames:{it |(ANTLR_UINT8*) <it>}; separator=",\n"> |
| }; |
| <endif> |
| |
| <@members> |
| |
| <@end> |
| |
| /** \brief Name of the grammar file that generated this code |
| */ |
| static const char fileName[] = "<fileName>"; |
| |
| /** \brief Return the name of the grammar file that generated this code. |
| */ |
| const char* <name>::getGrammarFileName() |
| { |
| return fileName; |
| } |
| /** \brief Create a new <name> parser and return a context for it. |
| * |
| * \param[in] instream Pointer to an input stream interface. |
| * |
| * \return Pointer to new parser context upon success. |
| */ |
| <name>::<name>( StreamType* instream<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>) |
| <constructorInitializerType("NULL")> |
| { |
| // See if we can create a new parser with the standard constructor |
| // |
| this->init(instream<grammar.delegators:{g|, <g:delegateName()>}>); |
| } |
| |
| /** \brief Create a new <name> parser and return a context for it. |
| * |
| * \param[in] instream Pointer to an input stream interface. |
| * |
| * \return Pointer to new parser context upon success. |
| */ |
| <name>::<name>( StreamType* instream, RecognizerSharedStateType* state<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>) |
| <constructorInitializerType("state")> |
| { |
| this->init(instream <grammar.delegators:{g|, <g:delegateName()>}>); |
| } |
| |
| void <name>::init(StreamType* instream<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>) |
| { |
| <actions.parser.apifuncs> |
| <if(memoize)> |
| <if(grammar.grammarIsRoot)> |
| /* Create a LIST for recording rule memos. |
| */ |
| typedef RecognizerSharedStateType::RuleMemoType RuleMemoType; |
| this->setRuleMemo( new RuleMemoType(15) ); /* 16 bit depth is enough for 32768 rules! */<\n> |
| <endif> |
| <endif> |
| <if(grammar.directDelegates)> |
| // Initialize the lexers that we are going to delegate some |
| // functions to. |
| // |
| <grammar.directDelegates: |
| {g|m_<g:delegateName()> = new <g.recognizerName>(instream, this->get_psrstate(), this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n"> |
| <endif> |
| <if(grammar.delegators)> |
| // Install the pointers back to lexers that will delegate us to perform certain functions |
| // for them. |
| // |
| <grammar.delegators: {g| m_<g:delegateName()> = <g:delegateName()>;}; separator="\n"> |
| <endif> |
| /* Install the token table |
| */ |
| this->get_psrstate()->set_tokenNames( <grammar.composite.rootGrammar.recognizerName>TokenNames ); |
| |
| <@debugStuff()> |
| |
| } |
| |
| void |
| <name>::reset() |
| { |
| this->get_rec()->reset(); |
| } |
| |
| /** Free the parser resources |
| */ |
| <name>::~<name>() |
| { |
| <@cleanup> |
| <@end> |
| <if(grammar.directDelegates)> |
| // Free the parsers that we delegated to |
| // functions to.NULL the state so we only free it once. |
| // |
| <grammar.directDelegates: |
| {g| m_<g:delegateName()>->set_psrstate( NULL ); |
| delete m_<g:delegateName()>;}; separator="\n"> |
| <endif> |
| <if(memoize)> |
| <if(grammar.grammarIsRoot)> |
| if(this->getRuleMemo() != NULL) |
| { |
| delete this->getRuleMemo(); |
| this->setRuleMemo(NULL); |
| } |
| <endif> |
| <endif> |
| } |
| |
| /** Return token names used by this <grammarType()> |
| * |
| * The returned pointer is used as an index into the token names table (using the token |
| * number as the index). |
| * |
| * \return Pointer to first char * in the table. |
| */ |
| static ANTLR_UINT8** getTokenNames() |
| { |
| return <grammar.composite.rootGrammar.recognizerName>TokenNames; |
| } |
| |
| <members> |
| |
| /* Declare the bitsets |
| */ |
| <bitsets:{it | <bitsetDeclare(bitsetname={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, |
| words64=it.bits, traits={<name>ImplTraits} )>}> |
| |
| |
| <if(cyclicDFAs)> |
| |
| /* ========================================================================= |
| * DFA tables for the parser |
| */ |
| <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !> |
| /* ========================================================================= |
| * End of DFA tables for the parser |
| */ |
| <endif> |
| |
| /* ============================================== |
| * Parsing rules |
| */ |
| <rules; separator="\n\n"> |
| <if(grammar.delegatedRules)> |
| // Delegated methods that appear to be a part of this |
| // parser |
| // |
| <grammar.delegatedRules:{ruleDescriptor| |
| <returnType()> <name>::<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope()>) |
| { |
| <if(ruleDescriptor.hasReturnValue)>return <endif>m_<ruleDescriptor.grammar:delegateName()>-><ruleDescriptor.name>(<if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", "><endif>); |
| \}}; separator="\n"> |
| |
| <endif> |
| /* End of parsing rules |
| * ============================================== |
| */ |
| |
| /* ============================================== |
| * Syntactic predicates |
| */ |
| <synpreds:{p | <synpred(predname=p)>}> |
| /* End of syntactic predicates |
| * ============================================== |
| */ |
| |
| >> |
| |
| constructorInitializerType(rec_state) ::=<< |
| <if(PARSER)> |
| :ImplTraits::BaseParserType(ANTLR_SIZE_HINT, instream, <rec_state>) |
| <endif> |
| <if(TREE_PARSER)> |
| :ImplTraits::BaseTreeParserType(ANTLR_SIZE_HINT, instream, <rec_state>) |
| <endif> |
| >> |
| |
| parser( grammar, |
| name, |
| scopes, |
| tokens, |
| tokenNames, |
| rules, |
| numRules, |
| bitsets, |
| ASTLabelType, |
| superClass="Parser", |
| labelType="ImplTraits::CommonTokenType*", |
| members={<actions.parser.members>} |
| ) ::= << |
| <beginNamespace(actions)> |
| <genericParser(inputStreamType="CommonTokenStreamType*", rewriteElementType="Token", filterMode=false, ...)> |
| <endNamespace(actions)> |
| >> |
| |
| /** How to generate a tree parser; same as parser except the input |
| * stream is a different type. |
| */ |
| treeParser( grammar, |
| name, |
| scopes, |
| tokens, |
| tokenNames, |
| globalAction, |
| rules, |
| numRules, |
| bitsets, |
| filterMode, |
| labelType={<ASTLabelType>}, |
| ASTLabelType="ImplTraits::TreeType*", |
| superClass="TreeParser", |
| members={<actions.treeparser.members>} |
| ) ::= << |
| <beginNamespace(actions)> |
| <genericParser(inputStreamType="CommonTreeNodeStream*", rewriteElementType="Node", ...)> |
| <endNamespace(actions)> |
| >> |
| |
| /** A simpler version of a rule template that is specific to the imaginary |
| * rules created for syntactic predicates. As they never have return values |
| * nor parameters etc..., just give simplest possible method. Don't do |
| * any of the normal memoization stuff in here either; it's a waste. |
| * As predicates cannot be inlined into the invoking rule, they need to |
| * be in a rule by themselves. |
| */ |
| synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::= |
| << |
| // $ANTLR start <ruleName> |
| void <name>::m<ruleName>_fragment( <ruleDescriptor.parameterScope:parameterScope()> ) |
| { |
| <ruleLabelDefs()> |
| <ruleLabelInitializations()> |
| <if(trace)> |
| ANTLR_PRINTF("enter <ruleName> %d failed = %d, backtracking = %d\\n", this->LT(1),failed,this->get_backtracking() ); |
| <block> |
| ANTLR_PRINTF("exit <ruleName> %d, failed = %d, backtracking = %d\\n", this->LT(1),failed,this->get_backtracking()); |
| |
| <else> |
| <block> |
| <endif> |
| |
| goto rule<ruleDescriptor.name>Ex; /* Prevent compiler warnings */ |
| rule<ruleDescriptor.name>Ex: ; |
| } |
| // $ANTLR end <ruleName> |
| >> |
| |
| synpred(predname) ::= << |
| |
| bool <name>::msynpred( antlr3::ClassForwarder\< <predname> > ) |
| { |
| ANTLR_MARKER start; |
| bool success; |
| |
| this->inc_backtracking(); |
| <@start()> |
| start = this->mark(); |
| this->m<predname>_fragment(); // can never throw exception |
| success = !( this->get_failedflag() ); |
| this->rewind(start); |
| <@stop()> |
| this->dec_backtracking(); |
| this->set_failedflag(false); |
| return success; |
| }<\n> |
| >> |
| |
| lexerSynpred(predname) ::= << |
| <synpred(predname)> |
| >> |
| |
| ruleMemoization(rname) ::= << |
| <if(memoize)> |
| if ( (this->get_backtracking()>0) && (this->haveParsedRule(<ruleDescriptor.index>)) ) |
| { |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <if(!ruleDescriptor.isSynPred)> |
| retval.start = 0;<\n> |
| <endif> |
| <endif> |
| <(ruleDescriptor.actions.after):execAfter()> |
| <finalCode(finalBlock=finally)> |
| <if(!ruleDescriptor.isSynPred)> |
| <scopeClean()><\n> |
| <endif> |
| return <ruleReturnValue()>; |
| } |
| <endif> |
| >> |
| |
| /** How to test for failure and return from rule */ |
| checkRuleBacktrackFailure() ::= << |
| if (this->hasException()) |
| { |
| goto rule<ruleDescriptor.name>Ex; |
| } |
| <if(backtracking)> |
| if (this->hasFailed()) |
| { |
| <scopeClean()> |
| <@debugClean()> |
| return <ruleReturnValue()>; |
| } |
| <endif> |
| >> |
| |
| /** This rule has failed, exit indicating failure during backtrack */ |
| ruleBacktrackFailure() ::= << |
| <if(backtracking)> |
| if (this->get_backtracking()>0) |
| { |
| this->set_failedflag( true ); |
| <scopeClean()> |
| return <ruleReturnValue()>; |
| } |
| <endif> |
| >> |
| |
| /** How to generate code for a rule. This includes any return type |
| * data aggregates required for multiple return values. |
| */ |
| rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= << |
| /** |
| * $ANTLR start <ruleName> |
| * <fileName>:<description> |
| */ |
| <returnType()> |
| <name>::<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>) |
| { |
| <if(trace)>ANTLR_PRINTF("enter <ruleName> %s failed=%d, backtracking=%d\n", this->LT(1), this->get_backtracking() );<endif> |
| <ruleDeclarations()> |
| <ruleDescriptor.actions.declarations> |
| <ruleLabelDefs()> |
| <ruleInitializations()> |
| <ruleDescriptor.actions.init> |
| <ruleMemoization(rname=ruleName)> |
| <ruleLabelInitializations()> |
| |
| <if(actions.(actionScope).rulecatch)> |
| try { |
| <else> |
| <if(exceptions)> |
| try { |
| <endif> |
| <endif> |
| <@preamble()> |
| { |
| <block> |
| } |
| <ruleCleanUp()> |
| |
| <if(exceptions)> |
| <(ruleDescriptor.actions.after):execAfter()> |
| <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}> |
| <else> |
| <if(!emptyRule)> |
| if (this->hasException()) |
| { |
| this->preporterror(); |
| this->precover(); |
| <@setErrorReturnValue()> |
| } |
| <if(ruleDescriptor.actions.after)> |
| else |
| { |
| <(ruleDescriptor.actions.after):execAfter()> |
| }<\n> |
| <endif> |
| <if(actions.(actionScope).rulecatch)> |
| } <actions.(actionScope).rulecatch> |
| <endif> |
| <endif> |
| <endif> |
| |
| <if(trace)>ANTLR_PRINTF("exit <ruleName> %d failed=%s backtracking=%s\n", this->LT(1), failed, this->get_backtracking() );<endif> |
| <memoize()> |
| <if(finally)> |
| <finalCode(finalBlock=finally)> |
| <endif> |
| <scopeClean()> |
| <@postamble()> |
| return <ruleReturnValue()>; |
| } |
| /* $ANTLR end <ruleName> */ |
| >> |
| |
| finalCode(finalBlock) ::= << |
| { |
| <finalBlock> |
| } |
| |
| >> |
| |
| catch(decl,action) ::= << |
| /* catch(decl,action) |
| */ |
| }catch (<e.decl>) { |
| <e.action> |
| } |
| >> |
| |
| ruleDeclarations() ::= << |
| |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <returnType()> retval(this);<\n> |
| <else> |
| <if(PARSER)> |
| <name>ImplTraits::RuleReturnValueType _antlr_rule_exit(this); |
| <endif> |
| <if(ruleDescriptor.returnScope)> |
| <ruleDescriptor.returnScope.attributes:{ a | |
| <a.type> <a.name>; |
| }> |
| <endif> |
| <endif> |
| <if(memoize)> |
| ANTLR_MARKER <ruleDescriptor.name>_StartIndex; |
| <endif> |
| >> |
| |
| ruleInitializations() ::= << |
| /* Initialize rule variables |
| */ |
| <if(ruleDescriptor.returnScope)> |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <ruleDescriptor.returnScope.attributes:{ a | <if(a.initValue)>retval.<a.name> = <a.initValue>;<endif> }> |
| <else> |
| <ruleDescriptor.returnScope.attributes:{ a | <if(a.initValue)><a.name> = <a.initValue>;<endif> }> |
| <endif> |
| <endif> |
| <if(memoize)> |
| <ruleDescriptor.name>_StartIndex = this->index();<\n> |
| <endif> |
| <ruleDescriptor.useScopes:{it | m_<it>_stack.push(<it>Scope()); }; separator="\n"> |
| <ruleDescriptor.ruleScope:{it | m_<it.name>_stack.push(<it.name>Scope()); }; separator="\n"> |
| >> |
| |
| ruleLabelDefs() ::= << |
| <[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels, |
| ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels] |
| :{it |<labelType> <it.label.text> = NULL;}; separator="\n" |
| > |
| <[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels] |
| :{it |ImplTraits::TokenPtrsListType list_<it.label.text>;}; separator="\n" |
| > |
| <ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n"> |
| >> |
| |
| ruleLabelInitializations() ::= << |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <if(!ruleDescriptor.isSynPred)> |
| retval.call_start_placeholder(); |
| <endif> |
| <endif> |
| >> |
| |
| lexerRuleLabelDefs() ::= << |
| <[ruleDescriptor.tokenLabels, |
| ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleLabels] |
| :{it |<labelType> <it.label.text> = NULL;}; separator="\n" |
| > |
| <ruleDescriptor.charLabels:{it |ANTLR_UINT32 <it.label.text>;}; separator="\n"> |
| <[ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleListLabels, |
| ruleDescriptor.ruleListLabels] |
| :{it | ImplTraits::IntTrieType<CommonTokenType>* list_<it.label.text>;}; separator="\n" |
| > |
| >> |
| |
| lexerRuleLabelInit() ::= << |
| <[ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleListLabels, |
| ruleDescriptor.ruleListLabels] |
| :{it |list_<it.label.text> = new ImplTraits::IntTrieType<CommonTokenType>(31);}; separator="\n" |
| > |
| >> |
| |
| lexerRuleLabelFree() ::= << |
| <[ruleDescriptor.tokenLabels, |
| ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleLabels] |
| :{it |<it.label.text> = NULL;}; separator="\n" |
| > |
| <[ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleListLabels, |
| ruleDescriptor.ruleListLabels] |
| :{it | delete list_<it.label.text>;}; separator="\n" |
| > |
| >> |
| |
| ruleReturnValue() ::= <% |
| <if(!ruleDescriptor.isSynPred)> |
| <if(ruleDescriptor.hasReturnValue)> |
| <if(ruleDescriptor.hasSingleReturnValue)> |
| <ruleDescriptor.singleValueReturnName> |
| <else> |
| retval |
| <endif> |
| <endif> |
| <endif> |
| %> |
| |
| memoize() ::= << |
| <if(memoize)> |
| <if(backtracking)> |
| if ( this->get_backtracking() > 0 ) { this->memoize(<ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); } |
| <endif> |
| <endif> |
| >> |
| |
| ruleCleanUp() ::= << |
| |
| // This is where rules clean up and exit |
| // |
| goto rule<ruleDescriptor.name>Ex; /* Prevent compiler warnings */ |
| rule<ruleDescriptor.name>Ex: ; |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <if(!TREE_PARSER)> |
| <if(!ruleDescriptor.isSynPred)> |
| retval.call_stop_placeholder();<\n> |
| <endif> |
| <endif> |
| <endif> |
| >> |
| |
| scopeClean() ::= << |
| <ruleDescriptor.useScopes:{it | m_<it>_stack.pop(); }; separator="\n"> |
| <ruleDescriptor.ruleScope:{it | m_<it.name>_stack.pop(); }; separator="\n"> |
| |
| >> |
| /** How to generate a rule in the lexer; naked blocks are used for |
| * fragment rules, which do not produce tokens. |
| */ |
| lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= << |
| // Comes from: <block.description> |
| /** \brief Lexer rule generated by ANTLR3 |
| * |
| * $ANTLR start <ruleName> |
| * |
| * Looks to match the characters the constitute the token <ruleName> |
| * from the attached input stream. |
| * |
| * |
| * \remark |
| * - lexer->error == true if an exception was thrown. |
| */ |
| void <name>::m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>) |
| { |
| ANTLR_UINT32 _type; |
| <ruleDeclarations()> |
| <ruleDescriptor.actions.declarations> |
| <lexerRuleLabelDefs()> |
| <if(trace)> |
| std::cout \<\< "enter <ruleName> '" \<\< (char)this->LA(1) |
| \<\< "' line=" \<\< this->getLine() \<\< ":" \<\< this->getCharPositionInLine() |
| \<\< " failed=" \<\< this->get_failedflag() \<\< " backtracking=" \<\< this->get_backtracking() \<\< std::endl; |
| <endif> |
| |
| <if(nakedBlock)> |
| <ruleMemoization(rname=ruleName)> |
| <lexerRuleLabelInit()> |
| <ruleDescriptor.actions.init> |
| |
| <block><\n> |
| <else> |
| <ruleMemoization(rname=ruleName)> |
| <lexerRuleLabelInit()> |
| _type = <ruleName>; |
| |
| <ruleDescriptor.actions.init> |
| |
| <block> |
| this->get_lexstate()->set_type(_type); |
| <endif> |
| <if(trace)> |
| std::cout \<\< "exit <ruleName> '" \<\< (char)this->LA(1) |
| \<\< "' line=" \<\< this->getLine() \<\< ":" \<\< this->getCharPositionInLine() |
| \<\< " failed=" \<\< this->get_failedflag() \<\< " backtracking=" \<\< this->get_backtracking() \<\< std::endl; |
| <endif> |
| <ruleCleanUp()> |
| <lexerRuleLabelFree()> |
| <(ruleDescriptor.actions.after):execAfter()> |
| <memoize> |
| } |
| // $ANTLR end <ruleName> |
| >> |
| |
| /** How to generate code for the implicitly-defined lexer grammar rule |
| * that chooses between lexer rules. |
| */ |
| tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= << |
| /** This is the entry point in to the lexer from an object that |
| * wants to generate the next token, such as a pCOMMON_TOKEN_STREAM |
| */ |
| void |
| <name>::mTokens() |
| { |
| <block><\n> |
| |
| goto ruleTokensEx; /* Prevent compiler warnings */ |
| ruleTokensEx: ; |
| } |
| >> |
| |
| // S U B R U L E S |
| |
| /** A (...) subrule with multiple alternatives */ |
| block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= << |
| |
| // <fileName>:<description> |
| { |
| int alt<decisionNumber>=<maxAlt>; |
| <decls> |
| <@predecision()> |
| <decision> |
| <@postdecision()> |
| <@prebranch()> |
| switch (alt<decisionNumber>) |
| { |
| <alts:{a | <altSwitchCase(i,a)>}> |
| } |
| <@postbranch()> |
| } |
| >> |
| |
| /** A rule block with multiple alternatives */ |
| ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= << |
| { |
| // <fileName>:<description> |
| |
| ANTLR_UINT32 alt<decisionNumber>; |
| |
| alt<decisionNumber>=<maxAlt>; |
| |
| <decls> |
| <@predecision()> |
| <decision> |
| <@postdecision()> |
| switch (alt<decisionNumber>) |
| { |
| <alts:{a | <altSwitchCase(i,a)>}> |
| } |
| } |
| >> |
| |
| ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= << |
| // <fileName>:<description> |
| <decls> |
| <@prealt()> |
| <alts> |
| <@postalt()> |
| >> |
| |
| /** A special case of a (...) subrule with a single alternative */ |
| blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= << |
| // <fileName>:<description> |
| <decls> |
| <@prealt()> |
| <alts> |
| <@postalt()> |
| >> |
| |
| /** A (..)+ block with 1 or more alternatives */ |
| positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= << |
| // <fileName>:<description> |
| { |
| int cnt<decisionNumber>=0; |
| <decls> |
| <@preloop()> |
| |
| for (;;) |
| { |
| int alt<decisionNumber>=<maxAlt>; |
| <@predecision()> |
| <decision> |
| <@postdecision()> |
| switch (alt<decisionNumber>) |
| { |
| <alts:{a | <altSwitchCase(i,a)>}> |
| default: |
| |
| if ( cnt<decisionNumber> >= 1 ) |
| { |
| goto loop<decisionNumber>; |
| } |
| <ruleBacktrackFailure()> |
| <earlyExitEx()> |
| <@earlyExitException()> |
| goto rule<ruleDescriptor.name>Ex; |
| } |
| cnt<decisionNumber>++; |
| } |
| loop<decisionNumber>: ; /* Jump to here if this rule does not match */ |
| <@postloop()> |
| } |
| >> |
| |
| earlyExitEx() ::= << |
| /* mismatchedSetEx() |
| */ |
| new ANTLR_Exception\< <name>ImplTraits, EARLY_EXIT_EXCEPTION, StreamType>( this->get_rec(), "" ); |
| <\n> |
| >> |
| positiveClosureBlockSingleAlt ::= positiveClosureBlock |
| |
| /** A (..)* block with 1 or more alternatives */ |
| closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= << |
| |
| // <fileName>:<description> |
| <decls> |
| |
| <@preloop()> |
| for (;;) |
| { |
| int alt<decisionNumber>=<maxAlt>; |
| <@predecision()> |
| <decision> |
| <@postdecision()> |
| switch (alt<decisionNumber>) |
| { |
| <alts:{a | <altSwitchCase(i,a)>}> |
| default: |
| goto loop<decisionNumber>; /* break out of the loop */ |
| break; |
| } |
| } |
| loop<decisionNumber>: ; /* Jump out to here if this rule does not match */ |
| <@postloop()> |
| >> |
| |
| closureBlockSingleAlt ::= closureBlock |
| |
| /** Optional blocks (x)? are translated to (x|) by antlr before code generation |
| * so we can just use the normal block template |
| */ |
| optionalBlock ::= block |
| |
| optionalBlockSingleAlt ::= block |
| |
| /** A case in a switch that jumps to an alternative given the alternative |
| * number. A DFA predicts the alternative and then a simple switch |
| * does the jump to the code that actually matches that alternative. |
| */ |
| altSwitchCase(altNum,alt) ::= << |
| case <altNum>: |
| <@prealt()> |
| <alt> |
| break;<\n> |
| >> |
| |
| /** An alternative is just a list of elements; at outermost level */ |
| alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= << |
| // <fileName>:<description> |
| { |
| <@declarations()> |
| <@initializations()> |
| <elements:element()> |
| <rew> |
| <@cleanup()> |
| } |
| >> |
| |
| // E L E M E N T S |
| /** What to emit when there is no rewrite. For auto build |
| * mode, does nothing. |
| */ |
| noRewrite(rewriteBlockLevel, treeLevel) ::= "" |
| |
| /** Dump the elements one per line */ |
| element(e) ::= << |
| <@prematch()> |
| <e.el><\n> |
| >> |
| |
| /** match a token optionally with a label in front */ |
| tokenRef(token,label,elementIndex,terminalOptions) ::= << |
| <if(label)><label> = <endif> this->matchToken(<token>, &FOLLOW_<token>_in_<ruleName><elementIndex>); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| /** ids+=ID */ |
| tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= << |
| <tokenRef(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| listLabel(label,elem) ::= << |
| list_<label>.push_back(<elem>); |
| >> |
| |
| |
| /** match a character */ |
| charRef(char,label) ::= << |
| <if(label)> |
| <label> = this->LA(1);<\n> |
| <endif> |
| this->matchc(<char>); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| /** match a character range */ |
| charRangeRef(a,b,label) ::= << |
| <if(label)> |
| <label> = this->LA(1);<\n> |
| <endif> |
| this->matchRange(<a>, <b>); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| /** For now, sets are interval tests and must be tested inline */ |
| matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= << |
| <if(label)> |
| <if(LEXER)> |
| <label>= this->LA(1);<\n> |
| <else> |
| <label>=(<labelType>) this->LT(1);<\n> |
| <endif> |
| <endif> |
| if ( <s> ) |
| { |
| this->consume(); |
| <postmatchCode> |
| <if(!LEXER)> |
| this->set_perror_recovery(false); |
| <endif> |
| <if(backtracking)> this->set_failedflag(false); <\n><endif> |
| } |
| else |
| { |
| <ruleBacktrackFailure()> |
| <mismatchedSetEx()> |
| <@mismatchedSetException()> |
| <if(LEXER)> |
| this->recover(); |
| <else> |
| <! use following code to make it recover inline; |
| this->recoverFromMismatchedSet(&FOLLOW_set_in_<ruleName><elementIndex>); |
| !> |
| <endif> |
| goto rule<ruleDescriptor.name>Ex; |
| }<\n> |
| >> |
| |
| mismatchedSetEx() ::= << |
| new ANTLR_Exception\< <name>ImplTraits, MISMATCHED_SET_EXCEPTION, StreamType>( this->get_rec(), "" ); |
| <if(PARSER)> |
| this->get_exception()->set_expectingSet(NULL); |
| <! use following code to make it recover inline; |
| this->get_exception()->set_expectingSet( &FOLLOW_set_in_<ruleName><elementIndex> ); |
| !> |
| <endif> |
| >> |
| |
| matchRuleBlockSet ::= matchSet |
| |
| matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= << |
| <matchSet(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| /** Match a string literal */ |
| lexerStringRef(string,label,elementIndex) ::= << |
| <if(label)> |
| ANTLR_MARKER <label>Start = this->getCharIndex(); |
| ANTLR_UINT32 <label>StartLine<elementIndex> = this->getLine(); |
| ANTLR_UINT32 <label>StartCharPos<elementIndex> = this->getCharPositionInLine(); |
| this->matchs(<string>); |
| <checkRuleBacktrackFailure()> |
| <label> = new CommonTokenType; |
| <label>->set_type( CommonTokenType::TOKEN_INVALID ); |
| <label>->set_startIndex( <label>Start); |
| <label>->set_stopIndex( this->getCharIndex()-1); |
| <label>->set_input( this->get_input() ); |
| <label>->set_line( <label>StartLine<elementIndex> ); |
| <label>->set_charPositionInLine( <label>StartCharPos<elementIndex> ); |
| <else> |
| this->matchs(<string>); |
| <checkRuleBacktrackFailure()><\n> |
| <endif> |
| >> |
| |
| wildcard(token,label,elementIndex,terminalOptions) ::= << |
| <if(label)> |
| <label>=(<labelType>)this->LT(1);<\n> |
| <endif> |
| this->matchAnyToken(); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= << |
| <wildcard(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| /** Match . wildcard in lexer */ |
| wildcardChar(label, elementIndex) ::= << |
| <if(label)> |
| <label> = this->LA(1);<\n> |
| <endif> |
| this->matchAny(); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| wildcardCharListLabel(label, elementIndex) ::= << |
| <wildcardChar(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| /** Match a rule reference by invoking it possibly with arguments |
| * and a return value or values. The 'rule' argument was the |
| * target rule name, but now is type Rule, whose toString is |
| * same: the rule name. Now though you can access full rule |
| * descriptor stuff. |
| */ |
| ruleRef(rule,label,elementIndex,args,scope) ::= << |
| this->followPush(FOLLOW_<rule.name>_in_<ruleName><elementIndex>); |
| <if(label)><label>=<endif><if(scope)>m_<scope:delegateName()>-><endif><rule.name>(<if(args)><args; separator=", "><endif>);<\n> |
| this->followPop(); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| /** ids+=r */ |
| ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= << |
| <ruleRef(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| /** A lexer rule reference |
| * The 'rule' argument was the target rule name, but now |
| * is type Rule, whose toString is same: the rule name. |
| * Now though you can access full rule descriptor stuff. |
| */ |
| lexerRuleRef(rule,label,args,elementIndex,scope) ::= << |
| /* <description> */ |
| <if(label)> |
| { |
| ANTLR_MARKER <label>Start<elementIndex> = this->getCharIndex(); |
| ANTLR_UINT32 <label>StartLine<elementIndex> = this->getLine(); |
| ANTLR_UINT32 <label>StartCharPos<elementIndex> = this->getCharPositionInLine(); |
| <if(scope)>m_<scope:delegateName()>-><endif>m<rule.name>(<if(scope)>m_<scope:delegateName()><endif> <if(args)>, <endif><args; separator=", ">); |
| <checkRuleBacktrackFailure()> |
| <label> = new CommonTokenType(); |
| <label>->set_type( CommonTokenType::TOKEN_INVALID); |
| <label>->set_startIndex( <label>Start<elementIndex> ); |
| <label>->set_stopIndex( this->getCharIndex()-1 ); |
| <label>->set_input( this->get_input() ); |
| <label>->set_line( <label>StartLine<elementIndex> ); |
| <label>->set_charPositionInLine( <label>StartCharPos<elementIndex> ); |
| } |
| <else> |
| <if(scope)>m_<scope:delegateName()>-><endif>m<rule.name>(<args; separator=", ">); |
| <checkRuleBacktrackFailure()> |
| <endif> |
| >> |
| |
| /** i+=INT in lexer */ |
| lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= << |
| <lexerRuleRef(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| /** EOF in the lexer */ |
| lexerMatchEOF(label,elementIndex) ::= << |
| <if(label)> |
| { |
| ANTLR_UINT32 <label>Start<elementIndex>; |
| ANTLR_UINT32 <label>StartLine<elementIndex> = this->getLine(); |
| ANTLR_UINT32 <label>StartCharPos<elementIndex> = this->getCharPositionInLine(); |
| <labelType> <label>; |
| <label>Start<elementIndex> = this->getCharIndex(); |
| this->matchc(ANTLR_CHARSTREAM_EOF); |
| <checkRuleBacktrackFailure()> |
| <label> = new CommonTokenType(); |
| <label>->set_type( CommonTokenType::TOKEN_EOF ); |
| <label>->set_startIndex(<label>Start<elementIndex>); |
| <label>->set_stopIndex(this->getCharIndex()-1); |
| <label>->set_input( this->get_input() ); |
| <label>->set_line( <label>StartLine<elementIndex> ); |
| <label>->set_charPositionInLine( <label>StartCharPos<elementIndex> ); |
| } |
| <else> |
| this->matchc(ANTLR_CHARSTREAM_EOF); |
| <checkRuleBacktrackFailure()> |
| <endif> |
| >> |
| |
| // used for left-recursive rules |
| recRuleDefArg() ::= "int <recRuleArg()>" |
| recRuleArg() ::= "_p" |
| recRuleAltPredicate(ruleName,opPrec) ::= "<recRuleArg()> \<= <opPrec>" |
| recRuleSetResultAction() ::= "root_0=$<ruleName>_primary.tree;" |
| recRuleSetReturnAction(src,name) ::= "$<name>=$<src>.<name>;" |
| |
| /** match ^(root children) in tree parser */ |
| tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel, treeLevel) ::= << |
| <root:element()> |
| <actionsAfterRoot:element()> |
| <if(nullableChildList)> |
| if ( this->LA(1)== CommonTokenType::TOKEN_DOWN ) { |
| this->matchToken(CommonTokenType::TOKEN_DOWN, NULL); |
| <checkRuleBacktrackFailure()> |
| <children:element()> |
| this->matchToken(CommonTokenType::TOKEN_UP, NULL); |
| <checkRuleBacktrackFailure()> |
| } |
| <else> |
| this->matchToken(CommonTokenType::TOKEN_DOWN, NULL); |
| <checkRuleBacktrackFailure()> |
| <children:element()> |
| this->matchToken(CommonTokenType::TOKEN_UP, NULL); |
| <checkRuleBacktrackFailure()> |
| <endif> |
| >> |
| |
| /** Every predicate is used as a validating predicate (even when it is |
| * also hoisted into a prediction expression). |
| */ |
| validateSemanticPredicate(pred,description) ::= << |
| if ( !(<evalPredicate(...)>) ) |
| { |
| <ruleBacktrackFailure()> |
| <newFPE(...)> |
| } |
| >> |
| |
| newFPE() ::= << |
| ExceptionBaseType* ex = new ANTLR_Exception\< <name>ImplTraits, FAILED_PREDICATE_EXCEPTION, StreamType>( this->get_rec(), "<description>" ); |
| ex->set_ruleName( "<ruleName>" ); |
| <\n> |
| >> |
| |
| // F i x e d D F A (if-then-else) |
| |
| dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| |
| { |
| int LA<decisionNumber>_<stateNumber> = this->LA(<k>); |
| <edges; separator="\nelse "> |
| else |
| { |
| <if(eotPredictsAlt)> |
| alt<decisionNumber>=<eotPredictsAlt>; |
| <else> |
| <ruleBacktrackFailure()> |
| |
| <newNVException()> |
| goto rule<ruleDescriptor.name>Ex; |
| |
| <endif> |
| } |
| } |
| >> |
| |
| newNVException() ::= << |
| ExceptionBaseType* ex = new ANTLR_Exception\< <name>ImplTraits, NO_VIABLE_ALT_EXCEPTION, StreamType>( this->get_rec(), "<description>" ); |
| ex->set_decisionNum( <decisionNumber> ); |
| ex->set_state( <stateNumber> ); |
| <@noViableAltException()> |
| <\n> |
| >> |
| |
| /** Same as a normal DFA state except that we don't examine lookahead |
| * for the bypass alternative. It delays error detection but this |
| * is faster, smaller, and more what people expect. For (X)? people |
| * expect "if ( LA(1)==X ) match(X);" and that's it. |
| */ |
| dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| { |
| int LA<decisionNumber>_<stateNumber> = this->LA(<k>); |
| <edges; separator="\nelse "> |
| } |
| >> |
| |
| /** A DFA state that is actually the loopback decision of a closure |
| * loop. If end-of-token (EOT) predicts any of the targets then it |
| * should act like a default clause (i.e., no error can be generated). |
| * This is used only in the lexer so that for ('a')* on the end of a rule |
| * anything other than 'a' predicts exiting. |
| */ |
| |
| dfaLoopbackStateDecls()::= << |
| ANTLR_UINT32 LA<decisionNumber>_<stateNumber>; |
| >> |
| dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| { |
| /* dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) |
| */ |
| int LA<decisionNumber>_<stateNumber> = this->LA(<k>); |
| <edges; separator="\nelse "><\n> |
| <if(eotPredictsAlt)> |
| <if(!edges)> |
| alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !> |
| <else> |
| else |
| { |
| alt<decisionNumber>=<eotPredictsAlt>; |
| }<\n> |
| <endif> |
| <endif> |
| } |
| >> |
| |
| /** An accept state indicates a unique alternative has been predicted */ |
| dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;" |
| |
| /** A simple edge with an expression. If the expression is satisfied, |
| * enter to the target state. To handle gated productions, we may |
| * have to evaluate some predicates for this edge. |
| */ |
| dfaEdge(labelExpr, targetState, predicates) ::= << |
| if ( (<labelExpr>)<if(predicates)> && (<predicates>)<endif>) |
| { |
| <targetState> |
| } |
| >> |
| |
| // F i x e d D F A (switch case) |
| |
| /** A DFA state where a SWITCH may be generated. The code generator |
| * decides if this is possible: CodeGenerator.canGenerateSwitch(). |
| */ |
| dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| switch ( this->LA(<k>) ) |
| { |
| <edges; separator="\n"> |
| |
| default: |
| <if(eotPredictsAlt)> |
| alt<decisionNumber>=<eotPredictsAlt>; |
| <else> |
| <ruleBacktrackFailure()> |
| <newNVException()> |
| goto rule<ruleDescriptor.name>Ex;<\n> |
| <endif> |
| }<\n> |
| >> |
| |
| dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| switch ( this->LA(<k>) ) |
| { |
| <edges; separator="\n"> |
| }<\n> |
| >> |
| |
| dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| switch ( this->LA(<k>) ) |
| { |
| <edges; separator="\n"><\n> |
| <if(eotPredictsAlt)> |
| default: |
| alt<decisionNumber>=<eotPredictsAlt>; |
| break;<\n> |
| <endif> |
| }<\n> |
| >> |
| |
| dfaEdgeSwitch(labels, targetState) ::= << |
| <labels:{it |case <it>:}; separator="\n"> |
| { |
| <targetState> |
| } |
| break; |
| >> |
| |
| // C y c l i c D F A |
| |
| /** The code to initiate execution of a cyclic DFA; this is used |
| * in the rule to predict an alt just like the fixed DFA case. |
| * The <name> attribute is inherited via the parser, lexer, ... |
| */ |
| dfaDecision(decisionNumber,description) ::= << |
| alt<decisionNumber> = cdfa<decisionNumber>.predict(this, this->get_rec(), this->get_istream(), cdfa<decisionNumber> ); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| /* Dump DFA tables as static initialized arrays of shorts(16 bits)/characters(8 bits) |
| * which are then used to statically initialize the dfa structure, which means that there |
| * is no runtime initialization whatsoever, other than anything the C compiler might |
| * need to generate. In general the C compiler will lay out memory such that there is no |
| * runtime code required. |
| */ |
| cyclicDFA(dfa) ::= << |
| /** Static dfa state tables for Cyclic dfa: |
| * <dfa.description> |
| */ |
| static const ANTLR_INT32 dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] = |
| { |
| <dfa.eot; wrap="\n", separator=", ", null="-1"> |
| }; |
| static const ANTLR_INT32 dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] = |
| { |
| <dfa.eof; wrap="\n", separator=", ", null="-1"> |
| }; |
| static const ANTLR_INT32 dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] = |
| { |
| <dfa.min; wrap="\n", separator=", ", null="-1"> |
| }; |
| static const ANTLR_INT32 dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] = |
| { |
| <dfa.max; wrap="\n", separator=", ", null="-1"> |
| }; |
| static const ANTLR_INT32 dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] = |
| { |
| <dfa.accept; wrap="\n", separator=", ", null="-1"> |
| }; |
| static const ANTLR_INT32 dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] = |
| { |
| <dfa.special; wrap="\n", separator=", ", null="-1"> |
| }; |
| |
| /** Used when there is no transition table entry for a particular state */ |
| static const ANTLR_INT32* dfa<dfa.decisionNumber>_T_empty = NULL; |
| |
| <dfa.edgeTransitionClassMap.keys:{ table | |
| static const ANTLR_INT32 dfa<dfa.decisionNumber>_T<i0>[] = |
| { |
| <table; separator=", ", wrap="\n", null="-1"> |
| \};<\n>}; null = ""> |
| |
| /* Transition tables are a table of sub tables, with some tables |
| * reused for efficiency. |
| */ |
| static const ANTLR_INT32 * const dfa<dfa.decisionNumber>_transitions[] = |
| { |
| <dfa.transitionEdgeTables:{xref|dfa<dfa.decisionNumber>_T<xref>}; separator=", ", wrap="\n", null="NULL"> |
| }; |
| |
| <@errorMethod()> |
| |
| /* Declare tracking structure for Cyclic DFA <dfa.decisionNumber> |
| */ |
| class <name>CyclicDFA<dfa.decisionNumber> : public CyclicDFA\< <name>ImplTraits, <name> >, public <name>Tokens |
| { |
| public: |
| typedef CyclicDFA\< <name>ImplTraits, <name> > BaseType; |
| typedef BaseType::ContextType CtxType; |
| |
| private: |
| <if(dfa.specialStateSTs)> |
| //to maintain C-Target compatibility, we need to make some of ctx functions look like member funcs |
| CtxType* m_ctx; |
| <endif> |
| |
| public: |
| <name>CyclicDFA<dfa.decisionNumber>( ANTLR_INT32 decisionNumber |
| , const ANTLR_UCHAR* description |
| , const ANTLR_INT32* const eot |
| , const ANTLR_INT32* const eof |
| , const ANTLR_INT32* const min |
| , const ANTLR_INT32* const max |
| , const ANTLR_INT32* const accept |
| , const ANTLR_INT32* const special |
| , const ANTLR_INT32* const *const transition) |
| :BaseType( decisionNumber, description, eot, eof, min, max, accept, |
| special, transition ) |
| { |
| <if(dfa.specialStateSTs)> |
| m_ctx = NULL; |
| <endif> |
| } |
| |
| <if(dfa.specialStateSTs)> |
| ANTLR_UINT32 LA(ANTLR_INT32 i) |
| { |
| return m_ctx->LA(i); |
| } |
| |
| <if(PARSER)> |
| const CtxType::CommonTokenType* LT(ANTLR_INT32 k) |
| { |
| return m_ctx->LT(k); |
| } |
| <endif> |
| <if(synpreds)> |
| template\<typename PredType> |
| bool msynpred( PredType pred ) |
| { |
| return m_ctx->msynpred(pred); |
| } |
| <endif> |
| |
| ANTLR_INT32 specialStateTransition(CtxType * ctx, RecognizerType* recognizer, IntStreamType* is, ANTLR_INT32 s) |
| { |
| ANTLR_INT32 _s; |
| |
| m_ctx = ctx; |
| _s = s; |
| switch (s) |
| { |
| <dfa.specialStateSTs:{state | |
| case <i0>: |
| |
| <state>}; separator="\n"> |
| } |
| <if(backtracking)> |
| if ( ctx->get_backtracking() > 0) |
| { |
| ctx->set_failedflag( true ); |
| return -1; |
| } |
| <endif> |
| ExceptionBaseType* ex = new ANTLR_Exception\< <name>ImplTraits, NO_VIABLE_ALT_EXCEPTION, StreamType>( recognizer, "<dfa.description>" ); |
| ex->set_decisionNum( <dfa.decisionNumber> ); |
| ex->set_state(_s); |
| <@noViableAltException()> |
| return -1; |
| } |
| <endif> |
| }; |
| |
| static <name>CyclicDFA<dfa.decisionNumber> cdfa<dfa.decisionNumber>( |
| <dfa.decisionNumber>, /* Decision number of this dfa */ |
| /* Which decision this represents: */ |
| (const ANTLR_UCHAR*)"<dfa.description>", |
| dfa<dfa.decisionNumber>_eot, /* EOT table */ |
| dfa<dfa.decisionNumber>_eof, /* EOF table */ |
| dfa<dfa.decisionNumber>_min, /* Minimum tokens for each state */ |
| dfa<dfa.decisionNumber>_max, /* Maximum tokens for each state */ |
| dfa<dfa.decisionNumber>_accept, /* Accept table */ |
| dfa<dfa.decisionNumber>_special, /* Special transition states */ |
| dfa<dfa.decisionNumber>_transitions /* Table of transition tables */ |
| |
| ); |
| |
| |
| /* End of Cyclic DFA <dfa.decisionNumber> |
| * --------------------- |
| */ |
| >> |
| |
| /** A state in a cyclic DFA; it's a special state and part of a big switch on |
| * state. |
| */ |
| cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= << |
| { |
| ANTLR_UINT32 LA<decisionNumber>_<stateNumber>;<\n> |
| ANTLR_MARKER index<decisionNumber>_<stateNumber>;<\n> |
| |
| LA<decisionNumber>_<stateNumber> = ctx->LA(1);<\n> |
| <if(semPredState)> <! get next lookahead symbol to test edges, then rewind !> |
| index<decisionNumber>_<stateNumber> = ctx->index();<\n> |
| ctx->rewindLast();<\n> |
| <endif> |
| s = -1; |
| <edges; separator="\nelse "> |
| <if(semPredState)> <! return input cursor to state before we rewound !> |
| ctx->seek(index<decisionNumber>_<stateNumber>);<\n> |
| <endif> |
| if ( s>=0 ) |
| { |
| return s; |
| } |
| } |
| break; |
| >> |
| |
| /** Just like a fixed DFA edge, test the lookahead and indicate what |
| * state to jump to next if successful. |
| */ |
| cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= << |
| if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif> ) |
| { |
| s = <targetStateNumber>; |
| }<\n> |
| >> |
| |
| /** An edge pointing at end-of-token; essentially matches any char; |
| * always jump to the target. |
| */ |
| eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= << |
| s = <targetStateNumber>;<\n> |
| >> |
| |
| |
| // D F A E X P R E S S I O N S |
| |
| andPredicates(left,right) ::= "( (<left>) && (<right>) )" |
| |
| orPredicates(operands) ::= "(<operands:{o|(<o>)}; separator=\"||\">)" |
| |
| notPredicate(pred) ::= "!( <evalPredicate(pred,{})> )" |
| |
| evalPredicate(pred,description) ::= "(<pred>)" |
| |
| evalSynPredicate(pred,description) ::= "this->msynpred( antlr3::ClassForwarder\<<pred>>() )" |
| |
| lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>" |
| |
| /** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable |
| * somewhere. Must ask for the lookahead directly. |
| */ |
| isolatedLookaheadTest(atom,k,atomAsInt) ::= "this->LA(<k>) == <atom>" |
| |
| lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <% |
| ((LA<decisionNumber>_<stateNumber> >= <lower>) && (LA<decisionNumber>_<stateNumber> \<= <upper>)) |
| %> |
| |
| isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "((this->LA(<k>) >= <lower>) && (this->LA(<k>) \<= <upper>))" |
| |
| setTest(ranges) ::= "<ranges; separator=\" || \">" |
| |
| // A T T R I B U T E S |
| |
| makeScopeSet() ::= << |
| /* makeScopeSet() |
| */ |
| /** Definition of the <scope.name> scope variable tracking |
| * structure. An instance of this structure is created by calling |
| * <name>_<scope.name>Push(). |
| */ |
| struct <scopeStruct(sname=scope.name,...)> |
| { |
| /* ============================================================================= |
| * Programmer defined variables... |
| */ |
| <scope.attributes:{it |<it.decl>;}; separator="\n"> |
| |
| /* End of programmer defined variables |
| * ============================================================================= |
| */ |
| }; |
| |
| >> |
| |
| globalAttributeScopeDecl(scope) ::= << |
| <if(scope.attributes)> |
| /* globalAttributeScopeDecl(scope) |
| */ |
| <makeScopeSet(...)> |
| <endif> |
| >> |
| |
| ruleAttributeScopeDecl(scope) ::= << |
| <if(scope.attributes)> |
| /* ruleAttributeScopeDecl(scope) |
| */ |
| <makeScopeSet(...)> |
| <endif> |
| >> |
| |
| globalAttributeScopeDef(scope) ::= |
| << |
| /* globalAttributeScopeDef(scope) |
| */ |
| <if(scope.attributes)> |
| |
| StackType\< <scopeStruct(sname=scope.name)> > <scopeStack(sname=scope.name)>; |
| |
| <endif> |
| >> |
| |
| ruleAttributeScopeDef(scope) ::= << |
| <if(scope.attributes)> |
| /* ruleAttributeScopeDef(scope) |
| */ |
| StackType\< <scopeStruct(sname=scope.name)> > <scopeStack(sname=scope.name,...)>; |
| |
| <endif> |
| >> |
| |
| scopeStruct(sname) ::= << |
| <sname>Scope |
| >> |
| |
| scopeStack(sname) ::= << |
| m_<sname>_stack |
| >> |
| |
| returnStructName(r) ::= "<r.name>_return" |
| |
| returnType() ::= <% |
| <if(!ruleDescriptor.isSynPred)> |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <ruleDescriptor.grammar.recognizerName>::<ruleDescriptor:returnStructName()> |
| <else> |
| <if(ruleDescriptor.hasSingleReturnValue)> |
| <ruleDescriptor.singleValueReturnType> |
| <else> |
| void |
| <endif> |
| <endif> |
| <else> |
| bool |
| <endif> |
| %> |
| |
| /** Generate the C type associated with a single or multiple return |
| * value(s). |
| */ |
| ruleLabelType(referencedRule) ::= <% |
| <if(referencedRule.hasMultipleReturnValues)> |
| <referencedRule.name>_return |
| <else> |
| <if(referencedRule.hasSingleReturnValue)> |
| <referencedRule.singleValueReturnType> |
| <else> |
| void |
| <endif> |
| <endif> |
| %> |
| |
| delegateName(d) ::= << |
| <if(d.label)><d.label><else>g<d.name><endif> |
| >> |
| |
| /** Using a type to init value map, try to init a type; if not in table |
| * must be an object, default value is "0". |
| */ |
| initValue(typeName) ::= << |
| = <cTypeInitMap.(typeName)> |
| >> |
| |
| /** Define a rule label */ |
| ruleLabelDef(label) ::= << |
| <ruleLabelType(referencedRule=label.referencedRule)> <label.label.text>; |
| >> |
| /** Rule label default value */ |
| ruleLabelInitVal(label) ::= << |
| >> |
| |
| ASTLabelType() ::= "<if(recognizer.ASTLabelType)><recognizer.ASTLabelType><else>ImplTraits::TreeType*<endif>" |
| |
| /** Define a return struct for a rule if the code needs to access its |
| * start/stop tokens, tree stuff, attributes, ... Leave a hole for |
| * subgroups to stick in members. |
| */ |
| returnScope(scope) ::= << |
| <if(!ruleDescriptor.isSynPred)> |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <if(!TREE_PARSER)> |
| struct <ruleDescriptor:returnStructName()> : public <name>ImplTraits::RuleReturnValueType |
| { |
| public: |
| typedef <name>ImplTraits::RuleReturnValueType BaseType; |
| <ruleDescriptor:returnStructName()>() |
| : BaseType() |
| <if(scope)>, <scope.attributes:{it | <it.name>() }; separator=","><endif> |
| { init(); } |
| <ruleDescriptor:returnStructName()>( BaseParserType* parser ) |
| : BaseType(parser) |
| <if(scope)>, <scope.attributes:{it | <it.name>() }; separator=","><endif> |
| { init(); } |
| <ruleDescriptor:returnStructName()>( const <ruleDescriptor:returnStructName()>& other ) |
| : BaseType(other) |
| <if(scope)>, <scope.attributes:{it | <it.name>(other.<it.name>) }; separator=", "><endif> |
| { copy(other); } |
| ~<ruleDescriptor:returnStructName()>() |
| { |
| <@ruleReturnMembersDelete()> |
| } |
| |
| <ruleDescriptor:returnStructName()>& |
| operator=( const <ruleDescriptor:returnStructName()>& other ) |
| { |
| BaseType::operator=( other ); |
| <if(scope)><scope.attributes:{it | <it.name> = other.<it.name>; }; separator="\n"><endif> |
| copy(other); |
| return *this; |
| } |
| <@ruleReturnMembers()> |
| void init() { <@ruleReturnMembersInit()> } |
| void copy( const <ruleDescriptor:returnStructName()>& other) { <@ruleReturnMembersCopy()> } |
| <else> |
| struct <ruleDescriptor:returnStructName()> |
| { |
| public: |
| <name>ImplTraits::<recognizer.ASTLabelType> start; |
| <name>ImplTraits::<recognizer.ASTLabelType> stop; |
| <ruleDescriptor:returnStructName()>( const <ruleDescriptor:returnStructName()>& other ) |
| <if(scope.attributes)> |
| <scope.attributes:{it | <it.name>(other.<it.name>) }; separator=","> |
| <endif> |
| { |
| start = other.start; |
| stop = other.stop; |
| } |
| |
| <ruleDescriptor:returnStructName()>& |
| operator=( const <ruleDescriptor:returnStructName()>& other ) |
| { |
| start = other.start; |
| stop = other.stop; |
| |
| <scope.attributes:{it | <it.name> = other.<it.name>; }; separator="\n"> |
| return *this; |
| } |
| <endif> |
| <if(scope)><scope.attributes:{it |<it.type> <it.name>;}; separator="\n"><endif> |
| }; |
| |
| <endif> |
| <endif> |
| >> |
| |
| parameterScope(scope) ::= << |
| <scope.attributes:{it |<it.decl>}; separator=", "> |
| >> |
| |
| parameterAttributeRef(attr) ::= "<attr.name>" |
| parameterSetAttributeRef(attr,expr) ::= "<attr.name>=<expr>;" |
| |
| /** Note that the scopeAttributeRef does not have access to the |
| * grammar name directly |
| */ |
| scopeAttributeRef(scope,attr,index,negIndex) ::= <% |
| <if(negIndex)> |
| m_<scope>_stack.at( m_<scope>_stack.size()-<negIndex>-1).<attr.name> |
| <else> |
| <if(index)> |
| m_<scope>_stack.at(<index>).<attr.name> |
| <else> |
| m_<scope>_stack.peek().<attr.name> |
| <endif> |
| <endif> |
| %> |
| |
| scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <% |
| <if(negIndex)> |
| m_<scope>_stack.at( m_<scope>_stack.size()-<negIndex>-1).<attr.name> = <expr>; |
| <else> |
| <if(index)> |
| m_<scope>_stack.at(<index>).<attr.name> = <expr>; |
| <else> |
| m_<scope>_stack.peek().<attr.name> =<expr>; |
| <endif> |
| <endif> |
| %> |
| |
| /** $x is either global scope or x is rule with dynamic scope; refers |
| * to stack itself not top of stack. This is useful for predicates |
| * like {$function.size()>0 && $function::name.equals("foo")}? |
| */ |
| isolatedDynamicScopeRef(scope) ::= "<scope>_stack" |
| |
| /** reference an attribute of rule; might only have single return value */ |
| ruleLabelRef(referencedRule,scope,attr) ::= << |
| <if(referencedRule.hasMultipleReturnValues)> |
| <scope>.<attr.name> |
| <else> |
| <scope> |
| <endif> |
| >> |
| |
| returnAttributeRef(ruleDescriptor,attr) ::= << |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| retval.<attr.name> |
| <else> |
| <attr.name> |
| <endif> |
| >> |
| |
| returnSetAttributeRef(ruleDescriptor,attr,expr) ::= << |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| retval.<attr.name>=<expr>; |
| <else> |
| <attr.name>=<expr>; |
| <endif> |
| >> |
| |
| /** How to translate $tokenLabel */ |
| tokenLabelRef(label) ::= "<label>" |
| |
| /** ids+=ID {$ids} or e+=expr {$e} */ |
| listLabelRef(label) ::= "list_<label>" |
| |
| |
| // not sure the next are the right approach |
| // |
| tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText())" |
| tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>->get_type())" |
| tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>->get_line())" |
| tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>->get_charPositionInLine())" |
| tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>->get_channel())" |
| tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>->get_tokenIndex())" |
| tokenLabelPropertyRef_tree(scope,attr) ::= "(<scope>->get_tree())" |
| tokenLabelPropertyRef_int(scope,attr) ::= "(<name>ImplTraits::ConvertToInt32(<scope>->getText()))" |
| |
| ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>.start)" |
| ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>.stop)" |
| ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>.tree)" |
| ruleLabelPropertyRef_text(scope,attr) ::= << |
| <if(TREE_PARSER)> |
| (this->get_strstream()->toStringSS(<scope>.start, <scope>.start)) |
| <else> |
| (this->get_strstream()->toStringTT(<scope>.start, <scope>.stop)) |
| <endif> |
| >> |
| |
| ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st" |
| |
| /** Isolated $RULE ref ok in lexer as it's a Token */ |
| lexerRuleLabel(label) ::= "<label>" |
| |
| lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>->get_type())" |
| lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>->get_line())" |
| lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>->get_charPositionInLine())" |
| lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>->get_channel())" |
| lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>->get_tokenIndex())" |
| lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText())" |
| |
| // Somebody may ref $template or $tree or $stop within a rule: |
| rulePropertyRef_start(scope,attr) ::= "retval.start" |
| rulePropertyRef_stop(scope,attr) ::= "retval.stop" |
| rulePropertyRef_tree(scope,attr) ::= "retval.tree" |
| rulePropertyRef_text(scope,attr) ::= << |
| <if(TREE_PARSER)> |
| this->get_input()->toStringSS( this->get_adaptor()->getTokenStartIndex(retval.start), this->get_adaptor()->getTokenStopIndex(retval.start)) |
| <else> |
| this->get_strstream()->toStringTT(retval.start, this->LT(-1)) |
| <endif> |
| >> |
| rulePropertyRef_st(scope,attr) ::= "retval.st" |
| |
| lexerRulePropertyRef_text(scope,attr) ::= "this->getText()" |
| lexerRulePropertyRef_type(scope,attr) ::= "_type" |
| lexerRulePropertyRef_line(scope,attr) ::= "this->get_state()->get_tokenStartLine()" |
| lexerRulePropertyRef_pos(scope,attr) ::= "this->get_state()->get_tokenStartCharPositionInLine()" |
| lexerRulePropertyRef_channel(scope,attr) ::= "this->get_state()->get_channel()" |
| lexerRulePropertyRef_start(scope,attr) ::= "this->get_state()->get_tokenStartCharIndex()" |
| lexerRulePropertyRef_stop(scope,attr) ::= "(this->getCharIndex()-1)" |
| lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer |
| lexerRulePropertyRef_int(scope,attr) ::= "(<name>ImplTraits::ConvertToInt32(<scope>->getText()))" |
| |
| |
| // setting $st and $tree is allowed in local rule. everything else is flagged as error |
| ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree=<expr>;" |
| ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st=<expr>;" |
| |
| |
| /** How to deal with an @after for C targets. Because we cannot rely on |
| * any garbage collection, after code is executed even in backtracking |
| * mode. Must be documented clearly. |
| */ |
| execAfter(action) ::= << |
| { |
| <action> |
| } |
| >> |
| |
| /** How to execute an action (when not backtracking) */ |
| execAction(action) ::= << |
| <if(backtracking)> |
| <if(actions.(actionScope).synpredgate)> |
| if ( <actions.(actionScope).synpredgate> ) |
| { |
| <action> |
| } |
| <else> |
| if ( BACKTRACKING == 0 ) |
| { |
| <action> |
| } |
| <endif> |
| <else> |
| { |
| <action> |
| } |
| <endif> |
| >> |
| |
| // M I S C (properties, etc...) |
| |
| bitsetDeclare(bitsetname, words64, traits) ::= << |
| |
| /** Bitset defining follow set for error recovery in rule state: <name> */ |
| static ANTLR_BITWORD <bitsetname>_bits[] = { <words64:{it |ANTLR_UINT64_LIT(<it>)}; separator=", "> }; |
| static <traits>::BitsetListType <bitsetname>( <bitsetname>_bits, <length(words64)> ); |
| >> |
| |
| codeFileExtension() ::= ".cpp" |
| |
| true_value() ::= "true" |
| false_value() ::= "false" |
| |