| /* |
| [The "BSD license"] |
| Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC |
| http://www.temporal-wave.com |
| http://www.linkedin.com/in/jimidle |
| |
| All rights reserved. |
| |
| Redistribution and use in source and binary forms, with or without |
| modification, are permitted provided that the following conditions |
| are met: |
| 1. Redistributions of source code must retain the above copyright |
| notice, this list of conditions and the following disclaimer. |
| 2. Redistributions in binary form must reproduce the above copyright |
| notice, this list of conditions and the following disclaimer in the |
| documentation and/or other materials provided with the distribution. |
| 3. The name of the author may not be used to endorse or promote products |
| derived from this software without specific prior written permission. |
| |
| THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| */ |
| |
| /* |
| * This code generating template and the associated C runtime was produced by: |
| * Jim Idle jimi|hereisanat|idle|dotgoeshere|ws. |
| * If it causes the destruction of the Universe, it will be pretty cool so long as |
| * I am in a different one at the time. |
| */ |
| cTypeInitMap ::= [ |
| "int" : "0", // Integers start out being 0 |
| "long" : "0", // Longs start out being 0 |
| "float" : "0.0", // Floats start out being 0 |
| "double" : "0.0", // Doubles start out being 0 |
| "ANTLR3_BOOLEAN" : "ANTLR3_FALSE", // Booleans start out being Antlr C for false |
| "byte" : "0", // Bytes start out being 0 |
| "short" : "0", // Shorts start out being 0 |
| "char" : "0" // Chars start out being 0 |
| ] |
| |
| leadIn(type) ::= |
| << |
| /** \file |
| * This <type> file was generated by $ANTLR version <ANTLRVersion> |
| * |
| * - From the grammar source file : <fileName> |
| * - On : <generatedTimestamp> |
| <if(LEXER)> |
| * - for the lexer : <name>Lexer |
| <endif> |
| <if(PARSER)> |
| * - for the parser : <name>Parser |
| <endif> |
| <if(TREE_PARSER)> |
| * - for the tree parser : <name>TreeParser |
| <endif> |
| * |
| * Editing it, at least manually, is not wise. |
| * |
| * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws. |
| * |
| * |
| >> |
| |
| /** The overall file structure of a recognizer; stores methods for rules |
| * and cyclic DFAs plus support code. |
| */ |
| outputFile( LEXER, |
| PARSER, |
| TREE_PARSER, |
| actionScope, |
| actions, |
| docComment, |
| recognizer, |
| name, |
| tokens, |
| tokenNames, |
| rules, |
| cyclicDFAs, |
| bitsets, |
| buildTemplate, |
| buildAST, |
| rewriteMode, |
| profile, |
| backtracking, |
| synpreds, |
| memoize, |
| numRules, |
| fileName, |
| ANTLRVersion, |
| generatedTimestamp, |
| trace, |
| scopes, |
| superClass, |
| literals |
| ) ::= |
| << |
| <leadIn("C source")> |
| */ |
| // [The "BSD license"] |
| // Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC |
| // http://www.temporal-wave.com |
| // http://www.linkedin.com/in/jimidle |
| // |
| // All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions |
| // are met: |
| // 1. Redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer. |
| // 2. Redistributions in binary form must reproduce the above copyright |
| // notice, this list of conditions and the following disclaimer in the |
| // documentation and/or other materials provided with the distribution. |
| // 3. The name of the author may not be used to endorse or promote products |
| // derived from this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| // IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| // NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| <if(actions.(actionScope).header)> |
| |
| /* ============================================================================= |
| * This is what the grammar programmer asked us to put at the top of every file. |
| */ |
| <actions.(actionScope).header> |
| /* End of Header action. |
| * ============================================================================= |
| */ |
| <endif> |
| |
| /* ----------------------------------------- |
| * Include the ANTLR3 generated header file. |
| */ |
| #include "<name>.h" |
| <actions.(actionScope).postinclude> |
| /* ----------------------------------------- */ |
| |
| <docComment> |
| |
| <if(literals)> |
| /** String literals used by <name> that we must do things like MATCHS() with. |
| * C will normally just lay down 8 bit characters, and you can use L"xxx" to |
| * get wchar_t, but wchar_t is 16 bits on Windows, which is not UTF32 and so |
| * we perform this little trick of defining the literals as arrays of UINT32 |
| * and passing in the address of these. |
| */ |
| <literals:{it | static ANTLR3_UCHAR lit_<i>[] = <it>;}; separator="\n"> |
| |
| <endif> |
| |
| |
| |
| |
| /* MACROS that hide the C interface implementations from the |
| * generated code, which makes it a little more understandable to the human eye. |
| * I am very much against using C pre-processor macros for function calls and bits |
| * of code as you cannot see what is happening when single stepping in debuggers |
| * and so on. The exception (in my book at least) is for generated code, where you are |
| * not maintaining it, but may wish to read and understand it. If you single step it, you know that input() |
| * hides some indirect calls, but is always referring to the input stream. This is |
| * probably more readable than ctx->input->istream->input(snarfle0->blarg) and allows me to rejig |
| * the runtime interfaces without changing the generated code too often, without |
| * confusing the reader of the generated output, who may not wish to know the gory |
| * details of the interface inheritance. |
| */ |
| |
| #define CTX ctx |
| |
| /* Aids in accessing scopes for grammar programmers |
| */ |
| #undef SCOPE_TYPE |
| #undef SCOPE_STACK |
| #undef SCOPE_TOP |
| #define SCOPE_TYPE(scope) p<name>_##scope##_SCOPE |
| #define SCOPE_STACK(scope) p<name>_##scope##Stack |
| #define SCOPE_TOP(scope) ctx->p<name>_##scope##Top |
| #define SCOPE_SIZE(scope) ctx->p<name>_##scope##Stack_limit |
| #define SCOPE_INSTANCE(scope, i) (ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope),i)) |
| |
| <if(LEXER)> |
| |
| /* Macros for accessing things in a lexer |
| */ |
| #undef LEXER |
| #undef RECOGNIZER |
| #undef RULEMEMO |
| #undef GETCHARINDEX |
| #undef GETLINE |
| #undef GETCHARPOSITIONINLINE |
| #undef EMIT |
| #undef EMITNEW |
| #undef MATCHC |
| #undef MATCHS |
| #undef MATCHRANGE |
| #undef LTOKEN |
| #undef HASFAILED |
| #undef FAILEDFLAG |
| #undef INPUT |
| #undef STRSTREAM |
| #undef LA |
| #undef HASEXCEPTION |
| #undef EXCEPTION |
| #undef CONSTRUCTEX |
| #undef CONSUME |
| #undef LRECOVER |
| #undef MARK |
| #undef REWIND |
| #undef REWINDLAST |
| #undef BACKTRACKING |
| #undef MATCHANY |
| #undef MEMOIZE |
| #undef HAVEPARSEDRULE |
| #undef GETTEXT |
| #undef INDEX |
| #undef SEEK |
| #undef PUSHSTREAM |
| #undef POPSTREAM |
| #undef SETTEXT |
| #undef SETTEXT8 |
| |
| #define LEXER ctx->pLexer |
| #define RECOGNIZER LEXER->rec |
| #define LEXSTATE RECOGNIZER->state |
| #define TOKSOURCE LEXSTATE->tokSource |
| #define GETCHARINDEX() LEXER->getCharIndex(LEXER) |
| #define GETLINE() LEXER->getLine(LEXER) |
| #define GETTEXT() LEXER->getText(LEXER) |
| #define GETCHARPOSITIONINLINE() LEXER->getCharPositionInLine(LEXER) |
| #define EMIT() LEXSTATE->type = _type; LEXER->emit(LEXER) |
| #define EMITNEW(t) LEXER->emitNew(LEXER, t) |
| #define MATCHC(c) LEXER->matchc(LEXER, c) |
| #define MATCHS(s) LEXER->matchs(LEXER, s) |
| #define MATCHRANGE(c1,c2) LEXER->matchRange(LEXER, c1, c2) |
| #define MATCHANY() LEXER->matchAny(LEXER) |
| #define LTOKEN LEXSTATE->token |
| #define HASFAILED() (LEXSTATE->failed == ANTLR3_TRUE) |
| #define BACKTRACKING LEXSTATE->backtracking |
| #define FAILEDFLAG LEXSTATE->failed |
| #define INPUT LEXER->input |
| #define STRSTREAM INPUT |
| #define ISTREAM INPUT->istream |
| #define INDEX() ISTREAM->index(ISTREAM) |
| #define SEEK(n) ISTREAM->seek(ISTREAM, n) |
| #define EOF_TOKEN &(LEXSTATE->tokSource->eofToken) |
| #define HASEXCEPTION() (LEXSTATE->error == ANTLR3_TRUE) |
| #define EXCEPTION LEXSTATE->exception |
| #define CONSTRUCTEX() RECOGNIZER->exConstruct(RECOGNIZER) |
| #define LRECOVER() LEXER->recover(LEXER) |
| #define MARK() ISTREAM->mark(ISTREAM) |
| #define REWIND(m) ISTREAM->rewind(ISTREAM, m) |
| #define REWINDLAST() ISTREAM->rewindLast(ISTREAM) |
| #define MEMOIZE(ri,si) RECOGNIZER->memoize(RECOGNIZER, ri, si) |
| #define HAVEPARSEDRULE(r) RECOGNIZER->alreadyParsedRule(RECOGNIZER, r) |
| #define PUSHSTREAM(str) LEXER->pushCharStream(LEXER, str) |
| #define POPSTREAM() LEXER->popCharStream(LEXER) |
| #define SETTEXT(str) LEXSTATE->text = str |
| #define SKIP() LEXSTATE->token = &(TOKSOURCE->skipToken) |
| #define USER1 LEXSTATE->user1 |
| #define USER2 LEXSTATE->user2 |
| #define USER3 LEXSTATE->user3 |
| #define CUSTOM LEXSTATE->custom |
| #define RULEMEMO LEXSTATE->ruleMemo |
| #define DBG RECOGNIZER->debugger |
| |
| /* If we have been told we can rely on the standard 8 bit or UTF16 input |
| * stream, then we can define our macros to use the direct pointers |
| * in the input object, which is much faster than indirect calls. This |
| * is really only significant to lexers with a lot of fragment rules (which |
| * do not place LA(1) in a temporary at the moment) and even then |
| * only if there is a lot of input (order of say 1M or so). |
| */ |
| #if defined(ANTLR3_INLINE_INPUT_8BIT) || defined(ANTLR3_INLINE_INPUT_UTF16) |
| |
| # ifdef ANTLR3_INLINE_INPUT_8BIT |
| |
| /* 8 bit character set */ |
| |
| # define NEXTCHAR ((pANTLR3_UINT8)(INPUT->nextChar)) |
| # define DATAP ((pANTLR3_UINT8)(INPUT->data)) |
| |
| # else |
| |
| # define NEXTCHAR ((pANTLR3_UINT16)(INPUT->nextChar)) |
| # define DATAP ((pANTLR3_UINT16)(INPUT->data)) |
| |
| # endif |
| |
| # define LA(n) ((NEXTCHAR + n) > (DATAP + INPUT->sizeBuf) ? ANTLR3_CHARSTREAM_EOF : (ANTLR3_UCHAR)(*(NEXTCHAR + n - 1))) |
| # define CONSUME() \\ |
| { \\ |
| if (NEXTCHAR \< (DATAP + INPUT->sizeBuf)) \\ |
| { \\ |
| INPUT->charPositionInLine++; \\ |
| if ((ANTLR3_UCHAR)(*NEXTCHAR) == INPUT->newlineChar) \\ |
| { \\ |
| INPUT->line++; \\ |
| INPUT->charPositionInLine = 0; \\ |
| INPUT->currentLine = (void *)(NEXTCHAR + 1); \\ |
| } \\ |
| INPUT->nextChar = (void *)(NEXTCHAR + 1); \\ |
| } \\ |
| } |
| |
| #else |
| |
| // Pick up the input character by calling the input stream implementation. |
| // |
| #define CONSUME() INPUT->istream->consume(INPUT->istream) |
| #define LA(n) INPUT->istream->_LA(INPUT->istream, n) |
| |
| #endif |
| <endif> |
| |
| <if(PARSER)> |
| /* Macros for accessing things in the parser |
| */ |
| |
| #undef PARSER |
| #undef RECOGNIZER |
| #undef HAVEPARSEDRULE |
| #undef MEMOIZE |
| #undef INPUT |
| #undef STRSTREAM |
| #undef HASEXCEPTION |
| #undef EXCEPTION |
| #undef MATCHT |
| #undef MATCHANYT |
| #undef FOLLOWSTACK |
| #undef FOLLOWPUSH |
| #undef FOLLOWPOP |
| #undef PRECOVER |
| #undef PREPORTERROR |
| #undef LA |
| #undef LT |
| #undef CONSTRUCTEX |
| #undef CONSUME |
| #undef MARK |
| #undef REWIND |
| #undef REWINDLAST |
| #undef PERRORRECOVERY |
| #undef HASFAILED |
| #undef FAILEDFLAG |
| #undef RECOVERFROMMISMATCHEDSET |
| #undef RECOVERFROMMISMATCHEDELEMENT |
| #undef INDEX |
| #undef ADAPTOR |
| #undef SEEK |
| #undef RULEMEMO |
| #undef DBG |
| |
| #define PARSER ctx->pParser |
| #define RECOGNIZER PARSER->rec |
| #define PSRSTATE RECOGNIZER->state |
| #define HAVEPARSEDRULE(r) RECOGNIZER->alreadyParsedRule(RECOGNIZER, r) |
| #define MEMOIZE(ri,si) RECOGNIZER->memoize(RECOGNIZER, ri, si) |
| #define INPUT PARSER->tstream |
| #define STRSTREAM INPUT |
| #define ISTREAM INPUT->istream |
| #define INDEX() ISTREAM->index(INPUT->istream) |
| #define HASEXCEPTION() (PSRSTATE->error == ANTLR3_TRUE) |
| #define EXCEPTION PSRSTATE->exception |
| #define MATCHT(t, fs) RECOGNIZER->match(RECOGNIZER, t, fs) |
| #define MATCHANYT() RECOGNIZER->matchAny(RECOGNIZER) |
| #define FOLLOWSTACK PSRSTATE->following |
| #ifdef SKIP_FOLLOW_SETS |
| #define FOLLOWPUSH(x) |
| #define FOLLOWPOP() |
| #else |
| #define FOLLOWPUSH(x) FOLLOWSTACK->push(FOLLOWSTACK, ((void *)(&(x))), NULL) |
| #define FOLLOWPOP() FOLLOWSTACK->pop(FOLLOWSTACK) |
| #endif |
| #define PRECOVER() RECOGNIZER->recover(RECOGNIZER) |
| #define PREPORTERROR() RECOGNIZER->reportError(RECOGNIZER) |
| #define LA(n) INPUT->istream->_LA(ISTREAM, n) |
| #define LT(n) INPUT->_LT(INPUT, n) |
| #define CONSTRUCTEX() RECOGNIZER->exConstruct(RECOGNIZER) |
| #define CONSUME() ISTREAM->consume(ISTREAM) |
| #define MARK() ISTREAM->mark(ISTREAM) |
| #define REWIND(m) ISTREAM->rewind(ISTREAM, m) |
| #define REWINDLAST() ISTREAM->rewindLast(ISTREAM) |
| #define SEEK(n) ISTREAM->seek(ISTREAM, n) |
| #define PERRORRECOVERY PSRSTATE->errorRecovery |
| #define FAILEDFLAG PSRSTATE->failed |
| #define HASFAILED() (FAILEDFLAG == ANTLR3_TRUE) |
| #define BACKTRACKING PSRSTATE->backtracking |
| #define RECOVERFROMMISMATCHEDSET(s) RECOGNIZER->recoverFromMismatchedSet(RECOGNIZER, s) |
| #define RECOVERFROMMISMATCHEDELEMENT(e) RECOGNIZER->recoverFromMismatchedElement(RECOGNIZER, s) |
| #define ADAPTOR ctx->adaptor |
| #define RULEMEMO PSRSTATE->ruleMemo |
| #define DBG RECOGNIZER->debugger |
| |
| <endif> |
| |
| <if(TREE_PARSER)> |
| /* Macros for accessing things in the parser |
| */ |
| |
| #undef PARSER |
| #undef RECOGNIZER |
| #undef HAVEPARSEDRULE |
| #undef INPUT |
| #undef STRSTREAM |
| #undef HASEXCEPTION |
| #undef EXCEPTION |
| #undef MATCHT |
| #undef MATCHANYT |
| #undef FOLLOWSTACK |
| #undef FOLLOWPUSH |
| #undef FOLLOWPOP |
| #undef PRECOVER |
| #undef PREPORTERROR |
| #undef LA |
| #undef LT |
| #undef CONSTRUCTEX |
| #undef CONSUME |
| #undef MARK |
| #undef REWIND |
| #undef REWINDLAST |
| #undef PERRORRECOVERY |
| #undef HASFAILED |
| #undef FAILEDFLAG |
| #undef RECOVERFROMMISMATCHEDSET |
| #undef RECOVERFROMMISMATCHEDELEMENT |
| #undef BACKTRACKING |
| #undef ADAPTOR |
| #undef RULEMEMO |
| #undef SEEK |
| #undef INDEX |
| #undef DBG |
| |
| #define PARSER ctx->pTreeParser |
| #define RECOGNIZER PARSER->rec |
| #define PSRSTATE RECOGNIZER->state |
| #define HAVEPARSEDRULE(r) RECOGNIZER->alreadyParsedRule(RECOGNIZER, r) |
| #define INPUT PARSER->ctnstream |
| #define ISTREAM INPUT->tnstream->istream |
| #define STRSTREAM INPUT->tnstream |
| #define HASEXCEPTION() (PSRSTATE->error == ANTLR3_TRUE) |
| #define EXCEPTION PSRSTATE->exception |
| #define MATCHT(t, fs) RECOGNIZER->match(RECOGNIZER, t, fs) |
| #define MATCHANYT() RECOGNIZER->matchAny(RECOGNIZER) |
| #define FOLLOWSTACK PSRSTATE->following |
| #define FOLLOWPUSH(x) FOLLOWSTACK->push(FOLLOWSTACK, ((void *)(&(x))), NULL) |
| #define FOLLOWPOP() FOLLOWSTACK->pop(FOLLOWSTACK) |
| #define PRECOVER() RECOGNIZER->recover(RECOGNIZER) |
| #define PREPORTERROR() RECOGNIZER->reportError(RECOGNIZER) |
| #define LA(n) ISTREAM->_LA(ISTREAM, n) |
| #define LT(n) INPUT->tnstream->_LT(INPUT->tnstream, n) |
| #define CONSTRUCTEX() RECOGNIZER->exConstruct(RECOGNIZER) |
| #define CONSUME() ISTREAM->consume(ISTREAM) |
| #define MARK() ISTREAM->mark(ISTREAM) |
| #define REWIND(m) ISTREAM->rewind(ISTREAM, m) |
| #define REWINDLAST() ISTREAM->rewindLast(ISTREAM) |
| #define PERRORRECOVERY PSRSTATE->errorRecovery |
| #define FAILEDFLAG PSRSTATE->failed |
| #define HASFAILED() (FAILEDFLAG == ANTLR3_TRUE) |
| #define BACKTRACKING PSRSTATE->backtracking |
| #define RECOVERFROMMISMATCHEDSET(s) RECOGNIZER->recoverFromMismatchedSet(RECOGNIZER, s) |
| #define RECOVERFROMMISMATCHEDELEMENT(e) RECOGNIZER->recoverFromMismatchedElement(RECOGNIZER, s) |
| #define ADAPTOR INPUT->adaptor |
| #define RULEMEMO PSRSTATE->ruleMemo |
| #define SEEK(n) ISTREAM->seek(ISTREAM, n) |
| #define INDEX() ISTREAM->index(ISTREAM) |
| #define DBG RECOGNIZER->debugger |
| |
| |
| <endif> |
| |
| #define TOKTEXT(tok, txt) tok, (pANTLR3_UINT8)txt |
| |
| /* The 4 tokens defined below may well clash with your own #defines or token types. If so |
| * then for the present you must use different names for your defines as these are hard coded |
| * in the code generator. It would be better not to use such names internally, and maybe |
| * we can change this in a forthcoming release. I deliberately do not #undef these |
| * here as this will at least give you a redefined error somewhere if they clash. |
| */ |
| #define UP ANTLR3_TOKEN_UP |
| #define DOWN ANTLR3_TOKEN_DOWN |
| #define EOR ANTLR3_TOKEN_EOR |
| #define INVALID ANTLR3_TOKEN_INVALID |
| |
| |
| /* ============================================================================= |
| * Functions to create and destroy scopes. First come the rule scopes, followed |
| * by the global declared scopes. |
| */ |
| |
| <rules: {r |<if(r.ruleDescriptor.ruleScope)> |
| <ruleAttributeScopeFuncDecl(scope=r.ruleDescriptor.ruleScope)> |
| <ruleAttributeScopeFuncs(scope=r.ruleDescriptor.ruleScope)> |
| <endif>}> |
| |
| <recognizer.scopes:{it | <if(it.isDynamicGlobalScope)> |
| <globalAttributeScopeFuncDecl(it)> |
| <globalAttributeScopeFuncs(it)> |
| <endif>}> |
| |
| /* ============================================================================= */ |
| |
| /* ============================================================================= |
| * Start of recognizer |
| */ |
| |
| <recognizer> |
| |
| /* End of code |
| * ============================================================================= |
| */ |
| |
| >> |
| headerFileExtension() ::= ".h" |
| |
| headerFile( LEXER, |
| PARSER, |
| TREE_PARSER, |
| actionScope, |
| actions, |
| docComment, |
| recognizer, |
| name, |
| tokens, |
| tokenNames, |
| rules, |
| cyclicDFAs, |
| bitsets, |
| buildTemplate, |
| buildAST, |
| rewriteMode, |
| profile, |
| backtracking, |
| synpreds, |
| memoize, |
| numRules, |
| fileName, |
| ANTLRVersion, |
| generatedTimestamp, |
| trace, |
| scopes, |
| superClass, |
| literals |
| ) ::= |
| << |
| <leadIn("C header")> |
| <if(PARSER)> |
| * The parser <mainName()> |
| <endif> |
| <if(LEXER)> |
| * The lexer <mainName()> |
| <endif> |
| <if(TREE_PARSER)> |
| * The tree parser <mainName()> |
| <endif> |
| has the callable functions (rules) shown below, |
| * which will invoke the code for the associated rule in the source grammar |
| * assuming that the input stream is pointing to a token/text stream that could begin |
| * this rule. |
| * |
| * For instance if you call the first (topmost) rule in a parser grammar, you will |
| * get the results of a full parse, but calling a rule half way through the grammar will |
| * allow you to pass part of a full token stream to the parser, such as for syntax checking |
| * in editors and so on. |
| * |
| * The parser entry points are called indirectly (by function pointer to function) via |
| * a parser context typedef p<name>, which is returned from a call to <name>New(). |
| * |
| <if(LEXER)> |
| * As this is a generated lexer, it is unlikely you will call it 'manually'. However |
| * the methods are provided anyway. |
| * |
| <endif> |
| * The methods in p<name> are as follows: |
| * |
| * <rules:{r | <if(!r.ruleDescriptor.isSynPred)> - <headerReturnType(ruleDescriptor=r.ruleDescriptor,...)> p<name>-><r.ruleDescriptor.name>(p<name>)<endif>}; separator="\n * "> |
| * |
| * The return type for any particular rule is of course determined by the source |
| * grammar file. |
| */ |
| // [The "BSD license"] |
| // Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC |
| // http://www.temporal-wave.com |
| // http://www.linkedin.com/in/jimidle |
| // |
| // All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions |
| // are met: |
| // 1. Redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer. |
| // 2. Redistributions in binary form must reproduce the above copyright |
| // notice, this list of conditions and the following disclaimer in the |
| // documentation and/or other materials provided with the distribution. |
| // 3. The name of the author may not be used to endorse or promote products |
| // derived from this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| // IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| // NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| #ifndef _<name>_H |
| #define _<name>_H |
| <actions.(actionScope).preincludes> |
| /* ============================================================================= |
| * Standard antlr3 C runtime definitions |
| */ |
| #include \<antlr3.h> |
| |
| /* End of standard antlr 3 runtime definitions |
| * ============================================================================= |
| */ |
| <actions.(actionScope).includes> |
| |
| #ifdef __cplusplus |
| extern "C" { |
| #endif |
| |
| // Forward declare the context typedef so that we can use it before it is |
| // properly defined. Delegators and delegates (from import statements) are |
| // interdependent and their context structures contain pointers to each other |
| // C only allows such things to be declared if you pre-declare the typedef. |
| // |
| typedef struct <name>_Ctx_struct <name>, * p<name>; |
| |
| <if(recognizer.grammar.delegates)> |
| // Include delegate definition header files |
| // |
| <recognizer.grammar.delegates: {g|#include \<<g.recognizerName>.h>}; separator="\n"> |
| |
| <endif> |
| |
| |
| <actions.(actionScope).header> |
| |
| #ifdef ANTLR3_WINDOWS |
| // Disable: Unreferenced parameter, - Rules with parameters that are not used |
| // constant conditional, - ANTLR realizes that a prediction is always true (synpred usually) |
| // initialized but unused variable - tree rewrite variables declared but not needed |
| // Unreferenced local variable - lexer rule declares but does not always use _type |
| // potentially unitialized variable used - retval always returned from a rule |
| // unreferenced local function has been removed - susually getTokenNames or freeScope, they can go without warnigns |
| // |
| // These are only really displayed at warning level /W4 but that is the code ideal I am aiming at |
| // and the codegen must generate some of these warnings by necessity, apart from 4100, which is |
| // usually generated when a parser rule is given a parameter that it does not use. Mostly though |
| // this is a matter of orthogonality hence I disable that one. |
| // |
| #pragma warning( disable : 4100 ) |
| #pragma warning( disable : 4101 ) |
| #pragma warning( disable : 4127 ) |
| #pragma warning( disable : 4189 ) |
| #pragma warning( disable : 4505 ) |
| #pragma warning( disable : 4701 ) |
| #endif |
| <if(backtracking)> |
| |
| /* ======================== |
| * BACKTRACKING IS ENABLED |
| * ======================== |
| */ |
| <endif> |
| |
| <rules:{r |<headerReturnScope(ruleDescriptor=r.ruleDescriptor,...)>}> |
| |
| <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeDecl(it)><endif>}> |
| <rules:{r |<ruleAttributeScopeDecl(scope=r.ruleDescriptor.ruleScope)>}> |
| <if(recognizer.grammar.delegators)> |
| // Include delegator definition header files |
| // |
| <recognizer.grammar.delegators: {g|#include \<<g.recognizerName>.h>}; separator="\n"> |
| |
| <endif> |
| |
| /** Context tracking structure for <mainName()> |
| */ |
| struct <name>_Ctx_struct |
| { |
| /** Built in ANTLR3 context tracker contains all the generic elements |
| * required for context tracking. |
| */ |
| <if(PARSER)> |
| pANTLR3_PARSER pParser; |
| <endif> |
| <if(LEXER)> |
| pANTLR3_LEXER pLexer; |
| <endif> |
| <if(TREE_PARSER)> |
| pANTLR3_TREE_PARSER pTreeParser; |
| <endif> |
| |
| <if(recognizer.grammar.delegates)> |
| <recognizer.grammar.delegates: |
| {g|p<g.recognizerName> <g:delegateName()>;}; separator="\n"> |
| <endif> |
| <if(recognizer.grammar.delegators)> |
| <recognizer.grammar.delegators: |
| {g|p<g.recognizerName> <g:delegateName()>;}; separator="\n"> |
| <endif> |
| <scopes:{it | <if(it.isDynamicGlobalScope)> |
| <globalAttributeScopeDef(it)> |
| <endif>}; separator="\n\n"> |
| <rules: {r |<if(r.ruleDescriptor.ruleScope)> |
| <ruleAttributeScopeDef(scope=r.ruleDescriptor.ruleScope)> |
| <endif>}> |
| |
| <if(LEXER)> |
| <rules:{r | <if(!r.ruleDescriptor.isSynPred)><headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*m<r.ruleDescriptor.name>) (struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);<endif>}; separator="\n"> |
| <endif> |
| <if(!LEXER)> |
| <rules:{r | <headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*<r.ruleDescriptor.name>) (struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n"> |
| <! generate rule/method definitions for imported rules so they |
| appear to be defined in this recognizer. !> |
| // Delegated rules |
| <recognizer.grammar.delegatedRules:{ruleDescriptor| |
| <headerReturnType(ruleDescriptor)> (*<ruleDescriptor.name>)(struct <name>_Ctx_struct * ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n"> |
| <endif> |
| |
| const char * (*getGrammarFileName)(); |
| void (*reset) (struct <name>_Ctx_struct * ctx); |
| void (*free) (struct <name>_Ctx_struct * ctx); |
| <@members> |
| <@end> |
| <actions.(actionScope).context> |
| }; |
| |
| // Function protoypes for the constructor functions that external translation units |
| // such as delegators and delegates may wish to call. |
| // |
| ANTLR3_API p<name> <name>New (<inputType()> instream<recognizer.grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>); |
| ANTLR3_API p<name> <name>NewSSD (<inputType()> instream, pANTLR3_RECOGNIZER_SHARED_STATE state<recognizer.grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>); |
| <if(!recognizer.grammar.grammarIsRoot)> |
| extern pANTLR3_UINT8 <recognizer.grammar.composite.rootGrammar.recognizerName>TokenNames[]; |
| <endif> |
| |
| |
| /** Symbolic definitions of all the tokens that the <grammarType()> will work with. |
| * \{ |
| * |
| * Antlr will define EOF, but we can't use that as it it is too common in |
| * in C header files and that would be confusing. There is no way to filter this out at the moment |
| * so we just undef it here for now. That isn't the value we get back from C recognizers |
| * anyway. We are looking for ANTLR3_TOKEN_EOF. |
| */ |
| #ifdef EOF |
| #undef EOF |
| #endif |
| #ifdef Tokens |
| #undef Tokens |
| #endif |
| <tokens:{it | #define <it.name> <it.type>}; separator="\n"> |
| #ifdef EOF |
| #undef EOF |
| #define EOF ANTLR3_TOKEN_EOF |
| #endif |
| |
| #ifndef TOKENSOURCE |
| #define TOKENSOURCE(lxr) lxr->pLexer->rec->state->tokSource |
| #endif |
| |
| /* End of token definitions for <name> |
| * ============================================================================= |
| */ |
| /** \} */ |
| |
| #ifdef __cplusplus |
| } |
| #endif |
| |
| #endif |
| |
| /* END - Note:Keep extra line feed to satisfy UNIX systems */ |
| |
| >> |
| |
| inputType() ::=<< |
| <if(LEXER)> |
| pANTLR3_INPUT_STREAM |
| <endif> |
| <if(PARSER)> |
| pANTLR3_COMMON_TOKEN_STREAM |
| <endif> |
| <if(TREE_PARSER)> |
| pANTLR3_COMMON_TREE_NODE_STREAM |
| <endif> |
| >> |
| |
| grammarType() ::= << |
| <if(PARSER)> |
| parser |
| <endif> |
| <if(LEXER)> |
| lexer |
| <endif> |
| <if(TREE_PARSER)> |
| tree parser |
| <endif> |
| >> |
| |
| mainName() ::= << |
| <if(PARSER)> |
| <name> |
| <endif> |
| <if(LEXER)> |
| <name> |
| <endif> |
| <if(TREE_PARSER)> |
| <name> |
| <endif> |
| >> |
| |
| headerReturnScope(ruleDescriptor) ::= "<returnScope(...)>" |
| |
| headerReturnType(ruleDescriptor) ::= << |
| <if(LEXER)> |
| <if(!ruleDescriptor.isSynPred)> |
| void |
| <else> |
| <returnType()> |
| <endif> |
| <else> |
| <returnType()> |
| <endif> |
| >> |
| |
| // Produce the lexer output |
| // |
| lexer( grammar, |
| name, |
| tokens, |
| scopes, |
| rules, |
| numRules, |
| filterMode, |
| superClass, |
| labelType="pANTLR3_COMMON_TOKEN") ::= << |
| |
| <if(filterMode)> |
| /* Forward declare implementation function for ANTLR3_TOKEN_SOURCE interface when |
| * this is a filter mode lexer. |
| */ |
| static pANTLR3_COMMON_TOKEN <name>NextToken (pANTLR3_TOKEN_SOURCE toksource); |
| |
| /* Override the normal MEMOIZE and HAVEALREADYPARSED macros as this is a filtering |
| * lexer. In filter mode, the memoizing and backtracking are gated at BACKTRACKING > 1 rather |
| * than just BACKTRACKING. In some cases this might generate code akin to: |
| * if (BACKTRACKING) if (BACKTRACKING > 1) memoize. |
| * However, I assume that the C compilers/optimizers are smart enough to work this one out |
| * these days - Jim |
| */ |
| #undef MEMOIZE |
| #define MEMOIZE(ri,si) if (BACKTRACKING>1) { RECOGNIZER->memoize(RECOGNIZER, ri, si) } |
| #undef HAVEPARSEDRULE |
| #define HAVEPARSEDRULE(r) if (BACKTRACKING>1) { RECOGNIZER->alreadyParsedRule(RECOGNIZER, r) } |
| <endif> |
| |
| /* Forward declare the locally static matching functions we have generated and any predicate functions. |
| */ |
| <rules:{r | static ANTLR3_INLINE <headerReturnType(ruleDescriptor=r.ruleDescriptor)> <if(!r.ruleDescriptor.isSynPred)>m<endif><r.ruleDescriptor.name> (p<name> ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n"> |
| static void <name>Free(p<name> ctx); |
| |
| /* ========================================================================= |
| * Lexer matching rules end. |
| * ========================================================================= |
| */ |
| |
| <scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}> |
| |
| <actions.lexer.members> |
| |
| static void |
| <name>Free (p<name> ctx) |
| { |
| <if(memoize)> |
| if (RULEMEMO != NULL) |
| { |
| RULEMEMO->free(RULEMEMO); |
| RULEMEMO = NULL; |
| } |
| <endif> |
| <if(grammar.directDelegates)> |
| // Free the lexers that we delegated to |
| // functions to. NULL the state so we only free it once. |
| // |
| <grammar.directDelegates: |
| {g|ctx-><g:delegateName()>->pLexer->rec->state = NULL; |
| ctx-><g:delegateName()>->free(ctx-><g:delegateName()>);}; separator="\n"> |
| <endif> |
| LEXER->free(LEXER); |
| |
| ANTLR3_FREE(ctx); |
| } |
| |
| static void |
| <name>Reset (p<name> ctx) |
| { |
| RECOGNIZER->reset(RECOGNIZER); |
| } |
| |
| /** \brief Name of the grammar file that generated this code |
| */ |
| static const char fileName[] = "<fileName>"; |
| |
| /** \brief Return the name of the grammar file that generated this code. |
| */ |
| static const char * getGrammarFileName() |
| { |
| return fileName; |
| } |
| |
| <if(filterMode)> |
| <filteringNextToken()> |
| <endif> |
| |
| /** \brief Create a new lexer called <name> |
| * |
| * \param[in] instream Pointer to an initialized input stream |
| * \return |
| * - Success p<name> initialized for the lex start |
| * - Fail NULL |
| */ |
| ANTLR3_API p<name> <name>New |
| (<inputType()> instream<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>) |
| { |
| // See if we can create a new lexer with the standard constructor |
| // |
| return <name>NewSSD(instream, NULL<grammar.delegators:{g|, <g:delegateName()>}>); |
| } |
| |
| /** \brief Create a new lexer called <name> |
| * |
| * \param[in] instream Pointer to an initialized input stream |
| * \param[state] state Previously created shared recognizer stat |
| * \return |
| * - Success p<name> initialized for the lex start |
| * - Fail NULL |
| */ |
| ANTLR3_API p<name> <name>NewSSD |
| (pANTLR3_INPUT_STREAM instream, pANTLR3_RECOGNIZER_SHARED_STATE state<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>) |
| { |
| p<name> ctx; // Context structure we will build and return |
| |
| ctx = (p<name>) ANTLR3_CALLOC(1, sizeof(<name>)); |
| |
| if (ctx == NULL) |
| { |
| // Failed to allocate memory for lexer context |
| return NULL; |
| } |
| |
| /* ------------------------------------------------------------------- |
| * Memory for basic structure is allocated, now to fill in |
| * in base ANTLR3 structures. We initialize the function pointers |
| * for the standard ANTLR3 lexer function set, but upon return |
| * from here, the programmer may set the pointers to provide custom |
| * implementations of each function. |
| * |
| * We don't use the macros defined in <name>.h here so you can get a sense |
| * of what goes where. |
| */ |
| |
| /* Create a base lexer, using the supplied input stream |
| */ |
| ctx->pLexer = antlr3LexerNewStream(ANTLR3_SIZE_HINT, instream, state); |
| |
| /* Check that we allocated the memory correctly |
| */ |
| if (ctx->pLexer == NULL) |
| { |
| ANTLR3_FREE(ctx); |
| return NULL; |
| } |
| <if(memoize)> |
| <if(grammar.grammarIsRoot)> |
| // Create a LIST for recording rule memos. |
| // |
| ctx->pLexer->rec->ruleMemo = antlr3IntTrieNew(15); /* 16 bit depth is enough for 32768 rules! */ |
| <endif> |
| <endif> |
| |
| /* Install the implementation of our <name> interface |
| */ |
| <rules:{r | <if(!r.ruleDescriptor.isSynPred)>ctx->m<r.ruleDescriptor.name> = m<r.ruleDescriptor.name>;<endif>}; separator="\n"> |
| |
| /** When the nextToken() call is made to this lexer's pANTLR3_TOKEN_SOURCE |
| * it will call mTokens() in this generated code, and will pass it the ctx |
| * pointer of this lexer, not the context of the base lexer, so store that now. |
| */ |
| ctx->pLexer->ctx = ctx; |
| |
| /**Install the token matching function |
| */ |
| ctx->pLexer->mTokens = (void (*) (void *))(mTokens); |
| |
| ctx->getGrammarFileName = getGrammarFileName; |
| ctx->free = <name>Free; |
| ctx->reset = <name>Reset; |
| |
| <if(grammar.directDelegates)> |
| // Initialize the lexers that we are going to delegate some |
| // functions to. |
| // |
| <grammar.directDelegates: |
| {g|ctx-><g:delegateName()> = <g.recognizerName>NewSSD(instream, ctx->pLexer->rec->state, ctx<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n"> |
| <endif> |
| <if(grammar.delegators)> |
| // Install the pointers back to lexers that will delegate us to perform certain functions |
| // for them. |
| // |
| <grammar.delegators: |
| {g|ctx-><g:delegateName()> = <g:delegateName()>;}; separator="\n"> |
| <endif> |
| <if(filterMode)> |
| /* We have filter mode turned on, so install the filtering nextToken function |
| */ |
| ctx->pLexer->rec->state->tokSource->nextToken = <name>NextToken; |
| <endif> |
| <actions.lexer.apifuncs> |
| |
| /* Return the newly built lexer to the caller |
| */ |
| return ctx; |
| } |
| <if(cyclicDFAs)> |
| |
| /* ========================================================================= |
| * DFA tables for the lexer |
| */ |
| <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !> |
| /* ========================================================================= |
| * End of DFA tables for the lexer |
| */ |
| <endif> |
| |
| /* ========================================================================= |
| * Functions to match the lexer grammar defined tokens from the input stream |
| */ |
| |
| <rules; separator="\n\n"> |
| |
| /* ========================================================================= |
| * Lexer matching rules end. |
| * ========================================================================= |
| */ |
| <if(synpreds)> |
| |
| /* ========================================================================= |
| * Lexer syntactic predicates |
| */ |
| <synpreds:{p | <lexerSynpred(predname=p)>}> |
| /* ========================================================================= |
| * Lexer syntactic predicates end. |
| * ========================================================================= |
| */ |
| <endif> |
| |
| /* End of Lexer code |
| * ================================================ |
| * ================================================ |
| */ |
| |
| >> |
| |
| |
| filteringNextToken() ::= << |
| /** An override of the lexer's nextToken() method that backtracks over mTokens() looking |
| * for matches in lexer filterMode. No error can be generated upon error; just rewind, consume |
| * a token and then try again. BACKTRACKING needs to be set as well. |
| * Make rule memoization happen only at levels above 1 as we start mTokens |
| * at BACKTRACKING==1. |
| */ |
| static pANTLR3_COMMON_TOKEN |
| <name>NextToken(pANTLR3_TOKEN_SOURCE toksource) |
| { |
| pANTLR3_LEXER lexer; |
| pANTLR3_RECOGNIZER_SHARED_STATE state; |
| |
| lexer = (pANTLR3_LEXER)(toksource->super); |
| state = lexer->rec->state; |
| |
| /* Get rid of any previous token (token factory takes care of |
| * any deallocation when this token is finally used up. |
| */ |
| state ->token = NULL; |
| state ->error = ANTLR3_FALSE; /* Start out without an exception */ |
| state ->failed = ANTLR3_FALSE; |
| |
| /* Record the start of the token in our input stream. |
| */ |
| state->tokenStartCharIndex = lexer->input->istream->index(lexer->input->istream); |
| state->tokenStartCharPositionInLine = lexer->input->getCharPositionInLine(lexer->input); |
| state->tokenStartLine = lexer->input->getLine(lexer->input); |
| state->text = NULL; |
| |
| /* Now call the matching rules and see if we can generate a new token |
| */ |
| for (;;) |
| { |
| if (lexer->input->istream->_LA(lexer->input->istream, 1) == ANTLR3_CHARSTREAM_EOF) |
| { |
| /* Reached the end of the stream, nothing more to do. |
| */ |
| pANTLR3_COMMON_TOKEN teof = &(toksource->eofToken); |
| |
| teof->setStartIndex (teof, lexer->getCharIndex(lexer)); |
| teof->setStopIndex (teof, lexer->getCharIndex(lexer)); |
| teof->setLine (teof, lexer->getLine(lexer)); |
| return teof; |
| } |
| |
| state->token = NULL; |
| state->error = ANTLR3_FALSE; /* Start out without an exception */ |
| |
| { |
| ANTLR3_MARKER m; |
| |
| m = lexer->input->istream->mark(lexer->input->istream); |
| state->backtracking = 1; /* No exceptions */ |
| state->failed = ANTLR3_FALSE; |
| |
| /* Call the generated lexer, see if it can get a new token together. |
| */ |
| lexer->mTokens(lexer->ctx); |
| state->backtracking = 0; |
| |
| <! mTokens backtracks with synpred at BACKTRACKING==2 |
| and we set the synpredgate to allow actions at level 1. !> |
| |
| if (state->failed == ANTLR3_TRUE) |
| { |
| lexer->input->istream->rewind(lexer->input->istream, m); |
| lexer->input->istream->consume(lexer->input->istream); <! advance one char and try again !> |
| } |
| else |
| { |
| lexer->emit(lexer); /* Assemble the token and emit it to the stream */ |
| return state->token; |
| } |
| } |
| } |
| } |
| >> |
| |
| actionGate() ::= "BACKTRACKING==0" |
| |
| filteringActionGate() ::= "BACKTRACKING==1" |
| |
| /** How to generate a parser */ |
| genericParser( grammar, |
| name, |
| scopes, |
| tokens, |
| tokenNames, |
| rules, |
| numRules, |
| bitsets, |
| inputStreamType, |
| superClass, |
| labelType, |
| members, |
| rewriteElementType, filterMode, |
| ASTLabelType="pANTLR3_BASE_TREE" |
| ) ::= << |
| |
| |
| <if(grammar.grammarIsRoot)> |
| /** \brief Table of all token names in symbolic order, mainly used for |
| * error reporting. |
| */ |
| pANTLR3_UINT8 <name>TokenNames[<length(tokenNames)>+4] |
| = { |
| (pANTLR3_UINT8) "\<invalid>", /* String to print to indicate an invalid token */ |
| (pANTLR3_UINT8) "\<EOR>", |
| (pANTLR3_UINT8) "\<DOWN>", |
| (pANTLR3_UINT8) "\<UP>", |
| <tokenNames:{it |(pANTLR3_UINT8) <it>}; separator=",\n"> |
| }; |
| <endif> |
| |
| <@members> |
| |
| <@end> |
| <rules:{r |<ruleAttributeScopeFuncMacro(scope=r.ruleDescriptor.ruleScope)>}> |
| <scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScopeFuncMacro(it)><endif>}> |
| |
| // Forward declare the locally static matching functions we have generated. |
| // |
| <rules:{r | static <headerReturnType(ruleDescriptor=r.ruleDescriptor)> <r.ruleDescriptor.name> (p<name> ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n"> |
| static void <name>Free(p<name> ctx); |
| static void <name>Reset (p<name> ctx); |
| |
| <if(!LEXER)> |
| <! generate rule/method definitions for imported rules so they |
| appear to be defined in this recognizer. !> |
| <if(recognizer.grammar.delegatedRules)> |
| // Delegated rules |
| // |
| <recognizer.grammar.delegatedRules:{ruleDescriptor|static <headerReturnType(ruleDescriptor)> <ruleDescriptor.name>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n"> |
| |
| <endif> |
| <endif> |
| |
| /* For use in tree output where we are accumulating rule labels via label += ruleRef |
| * we need a function that knows how to free a return scope when the list is destroyed. |
| * We cannot just use ANTLR3_FREE because in debug tracking mode, this is a macro. |
| */ |
| static void ANTLR3_CDECL freeScope(void * scope) |
| { |
| ANTLR3_FREE(scope); |
| } |
| |
| /** \brief Name of the grammar file that generated this code |
| */ |
| static const char fileName[] = "<fileName>"; |
| |
| /** \brief Return the name of the grammar file that generated this code. |
| */ |
| static const char * getGrammarFileName() |
| { |
| return fileName; |
| } |
| /** \brief Create a new <name> parser and return a context for it. |
| * |
| * \param[in] instream Pointer to an input stream interface. |
| * |
| * \return Pointer to new parser context upon success. |
| */ |
| ANTLR3_API p<name> |
| <name>New (<inputStreamType> instream<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>) |
| { |
| // See if we can create a new parser with the standard constructor |
| // |
| return <name>NewSSD(instream, NULL<grammar.delegators:{g|, <g:delegateName()>}>); |
| } |
| |
| /** \brief Create a new <name> parser and return a context for it. |
| * |
| * \param[in] instream Pointer to an input stream interface. |
| * |
| * \return Pointer to new parser context upon success. |
| */ |
| ANTLR3_API p<name> |
| <name>NewSSD (<inputStreamType> instream, pANTLR3_RECOGNIZER_SHARED_STATE state<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>) |
| { |
| p<name> ctx; /* Context structure we will build and return */ |
| |
| ctx = (p<name>) ANTLR3_CALLOC(1, sizeof(<name>)); |
| |
| if (ctx == NULL) |
| { |
| // Failed to allocate memory for parser context |
| // |
| return NULL; |
| } |
| |
| /* ------------------------------------------------------------------- |
| * Memory for basic structure is allocated, now to fill in |
| * the base ANTLR3 structures. We initialize the function pointers |
| * for the standard ANTLR3 parser function set, but upon return |
| * from here, the programmer may set the pointers to provide custom |
| * implementations of each function. |
| * |
| * We don't use the macros defined in <name>.h here, in order that you can get a sense |
| * of what goes where. |
| */ |
| |
| <if(PARSER)> |
| /* Create a base parser/recognizer, using the supplied token stream |
| */ |
| ctx->pParser = antlr3ParserNewStream(ANTLR3_SIZE_HINT, instream->tstream, state); |
| <endif> |
| <if(TREE_PARSER)> |
| /* Create a base Tree parser/recognizer, using the supplied tree node stream |
| */ |
| ctx->pTreeParser = antlr3TreeParserNewStream(ANTLR3_SIZE_HINT, instream, state); |
| <endif> |
| |
| /* Install the implementation of our <name> interface |
| */ |
| <rules:{r | ctx-><r.ruleDescriptor.name> = <r.ruleDescriptor.name>;}; separator="\n"> |
| <if(grammar.delegatedRules)> |
| // Install the delegated methods so that they appear to be a part of this |
| // parser |
| // |
| <grammar.delegatedRules:{ruleDescriptor | ctx-><ruleDescriptor.name> = <ruleDescriptor.name>;}; separator="\n"> |
| <endif> |
| |
| ctx->free = <name>Free; |
| ctx->reset = <name>Reset; |
| ctx->getGrammarFileName = getGrammarFileName; |
| |
| /* Install the scope pushing methods. |
| */ |
| <rules: {r |<if(r.ruleDescriptor.ruleScope)> |
| <ruleAttributeScope(scope=r.ruleDescriptor.ruleScope)><\n> |
| <endif>}> |
| <recognizer.scopes:{it |<if(it.isDynamicGlobalScope)> |
| <globalAttributeScope(it)><\n> |
| <endif>}> |
| <@apifuncs> |
| |
| <@end> |
| <if(grammar.directDelegates)> |
| // Initialize the parsers that we are going to delegate some |
| // functions to. |
| // |
| <grammar.directDelegates: |
| {g|ctx-><g:delegateName()> = <g.recognizerName>NewSSD(instream, PSRSTATE, ctx<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n"> |
| <endif> |
| <if(grammar.delegators)> |
| // Install the pointers back to parsers that will delegate us to perform certain functions |
| // for them. |
| // |
| <grammar.delegators: |
| {g|ctx-><g:delegateName()> = <g:delegateName()>;}; separator="\n"> |
| <endif> |
| <actions.parser.apifuncs> |
| <actions.treeparser.apifuncs> |
| <if(memoize)> |
| <if(grammar.grammarIsRoot)> |
| /* Create a LIST for recording rule memos. |
| */ |
| RULEMEMO = antlr3IntTrieNew(15); /* 16 bit depth is enough for 32768 rules! */<\n> |
| <endif> |
| <endif> |
| /* Install the token table |
| */ |
| PSRSTATE->tokenNames = <grammar.composite.rootGrammar.recognizerName>TokenNames; |
| |
| <@debugStuff()> |
| |
| /* Return the newly built parser to the caller |
| */ |
| return ctx; |
| } |
| |
| static void |
| <name>Reset (p<name> ctx) |
| { |
| RECOGNIZER->reset(RECOGNIZER); |
| } |
| |
| /** Free the parser resources |
| */ |
| static void |
| <name>Free(p<name> ctx) |
| { |
| /* Free any scope memory |
| */ |
| <rules: {r |<if(r.ruleDescriptor.ruleScope)><ruleAttributeScopeFree(scope=r.ruleDescriptor.ruleScope)><\n><endif>}> |
| <recognizer.scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScopeFree(it)><\n><endif>}> |
| |
| <@cleanup> |
| <@end> |
| <if(grammar.directDelegates)> |
| // Free the parsers that we delegated to |
| // functions to.NULL the state so we only free it once. |
| // |
| <grammar.directDelegates: |
| {g| ctx-><g:delegateName()>-><if(TREE_PARSER)>pTreeParser<else>pParser<endif>->rec->state = NULL; |
| ctx-><g:delegateName()>->free(ctx-><g:delegateName()>);}; separator="\n"> |
| <endif> |
| <if(memoize)> |
| <if(grammar.grammarIsRoot)> |
| if (RULEMEMO != NULL) |
| { |
| RULEMEMO->free(RULEMEMO); |
| RULEMEMO = NULL; |
| } |
| <endif> |
| <endif> |
| // Free this parser |
| // |
| <if(TREE_PARSER)> |
| ctx->pTreeParser->free(ctx->pTreeParser);<\n> |
| <else> |
| ctx->pParser->free(ctx->pParser);<\n> |
| <endif> |
| |
| ANTLR3_FREE(ctx); |
| |
| /* Everything is released, so we can return |
| */ |
| return; |
| } |
| |
| /** Return token names used by this <grammarType()> |
| * |
| * The returned pointer is used as an index into the token names table (using the token |
| * number as the index). |
| * |
| * \return Pointer to first char * in the table. |
| */ |
| static pANTLR3_UINT8 *getTokenNames() |
| { |
| return <grammar.composite.rootGrammar.recognizerName>TokenNames; |
| } |
| |
| <members> |
| |
| /* Declare the bitsets |
| */ |
| <bitsets:{it | <bitsetDeclare(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, |
| words64=it.bits)>}> |
| |
| |
| <if(cyclicDFAs)> |
| |
| /* ========================================================================= |
| * DFA tables for the parser |
| */ |
| <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !> |
| /* ========================================================================= |
| * End of DFA tables for the parser |
| */ |
| <endif> |
| |
| /* ============================================== |
| * Parsing rules |
| */ |
| <rules; separator="\n\n"> |
| <if(grammar.delegatedRules)> |
| // Delegated methods that appear to be a part of this |
| // parser |
| // |
| <grammar.delegatedRules:{ruleDescriptor| |
| <returnType()> <ruleDescriptor.name>(p<name> ctx<if(ruleDescriptor.parameterScope.attributes)>, <endif><ruleDescriptor.parameterScope:parameterScope()>) |
| { |
| <if(ruleDescriptor.hasReturnValue)>return <endif> ctx-><ruleDescriptor.grammar:delegateName()>-><ruleDescriptor.name>(ctx-><ruleDescriptor.grammar:delegateName()><if(ruleDescriptor.parameterScope.attributes)>, <endif><ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); |
| \}}; separator="\n"> |
| |
| <endif> |
| /* End of parsing rules |
| * ============================================== |
| */ |
| |
| /* ============================================== |
| * Syntactic predicates |
| */ |
| <synpreds:{p | <synpred(predname=p)>}> |
| /* End of syntactic predicates |
| * ============================================== |
| */ |
| |
| |
| |
| |
| |
| >> |
| |
| parser( grammar, |
| name, |
| scopes, |
| tokens, |
| tokenNames, |
| rules, |
| numRules, |
| bitsets, |
| ASTLabelType, |
| superClass="Parser", |
| labelType="pANTLR3_COMMON_TOKEN", |
| members={<actions.parser.members>} |
| ) ::= << |
| <genericParser(inputStreamType="pANTLR3_COMMON_TOKEN_STREAM", rewriteElementType="TOKEN", ...)> |
| >> |
| |
| /** How to generate a tree parser; same as parser except the input |
| * stream is a different type. |
| */ |
| treeParser( grammar, |
| name, |
| scopes, |
| tokens, |
| tokenNames, |
| globalAction, |
| rules, |
| numRules, |
| bitsets, |
| filterMode, |
| labelType={<ASTLabelType>}, |
| ASTLabelType="pANTLR3_BASE_TREE", |
| superClass="TreeParser", |
| members={<actions.treeparser.members>} |
| ) ::= << |
| <genericParser(inputStreamType="pANTLR3_COMMON_TREE_NODE_STREAM", rewriteElementType="NODE", ...)> |
| >> |
| |
| /** A simpler version of a rule template that is specific to the imaginary |
| * rules created for syntactic predicates. As they never have return values |
| * nor parameters etc..., just give simplest possible method. Don't do |
| * any of the normal memoization stuff in here either; it's a waste. |
| * As predicates cannot be inlined into the invoking rule, they need to |
| * be in a rule by themselves. |
| */ |
| synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::= |
| << |
| // $ANTLR start <ruleName> |
| static void <ruleName>_fragment(p<name> ctx <ruleDescriptor.parameterScope:parameterScope()>) |
| { |
| <ruleLabelDefs()> |
| <ruleLabelInitializations()> |
| <if(trace)> |
| ANTLR3_PRINTF("enter <ruleName> %d failed = %d, backtracking = %d\\n",LT(1),failed,BACKTRACKING); |
| <block> |
| ANTLR3_PRINTF("exit <ruleName> %d, failed = %d, backtracking = %d\\n",LT(1),failed,BACKTRACKING); |
| |
| <else> |
| <block> |
| <endif> |
| <ruleCleanUp()> |
| } |
| // $ANTLR end <ruleName> |
| >> |
| |
| synpred(predname) ::= << |
| static ANTLR3_BOOLEAN <predname>(p<name> ctx) |
| { |
| ANTLR3_MARKER start; |
| ANTLR3_BOOLEAN success; |
| |
| BACKTRACKING++; |
| <@start()> |
| start = MARK(); |
| <predname>_fragment(ctx); // can never throw exception |
| success = !(FAILEDFLAG); |
| REWIND(start); |
| <@stop()> |
| BACKTRACKING--; |
| FAILEDFLAG = ANTLR3_FALSE; |
| return success; |
| }<\n> |
| >> |
| |
| lexerSynpred(predname) ::= << |
| <synpred(predname)> |
| >> |
| |
| ruleMemoization(rname) ::= << |
| <if(memoize)> |
| if ( (BACKTRACKING>0) && (HAVEPARSEDRULE(<ruleDescriptor.index>)) ) |
| { |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <if(!ruleDescriptor.isSynPred)> |
| retval.start = 0;<\n> |
| <endif> |
| <endif> |
| <(ruleDescriptor.actions.after):execAfter()> |
| <finalCode(finalBlock=finally)> |
| <if(!ruleDescriptor.isSynPred)> |
| <scopeClean()><\n> |
| <endif> |
| return <ruleReturnValue()>; |
| } |
| <endif> |
| >> |
| |
| /** How to test for failure and return from rule */ |
| checkRuleBacktrackFailure() ::= << |
| if (HASEXCEPTION()) |
| { |
| goto rule<ruleDescriptor.name>Ex; |
| } |
| <if(backtracking)> |
| if (HASFAILED()) |
| { |
| <scopeClean()> |
| <@debugClean()> |
| return <ruleReturnValue()>; |
| } |
| <endif> |
| >> |
| |
| /** This rule has failed, exit indicating failure during backtrack */ |
| ruleBacktrackFailure() ::= << |
| <if(backtracking)> |
| if (BACKTRACKING>0) |
| { |
| FAILEDFLAG = <true_value()>; |
| <scopeClean()> |
| return <ruleReturnValue()>; |
| } |
| <endif> |
| >> |
| |
| /** How to generate code for a rule. This includes any return type |
| * data aggregates required for multiple return values. |
| */ |
| rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= << |
| /** |
| * $ANTLR start <ruleName> |
| * <fileName>:<description> |
| */ |
| static <returnType()> |
| <ruleName>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>) |
| { |
| <if(trace)>ANTLR3_PRINTF("enter <ruleName> %s failed=%d, backtracking=%d\n", LT(1), BACKTRACKING);<endif> |
| <ruleDeclarations()> |
| <ruleDescriptor.actions.declarations> |
| <ruleLabelDefs()> |
| <ruleInitializations()> |
| <ruleDescriptor.actions.init> |
| <ruleMemoization(rname=ruleName)> |
| <ruleLabelInitializations()> |
| <@preamble()> |
| { |
| <block> |
| } |
| |
| <ruleCleanUp()> |
| <if(exceptions)> |
| if (HASEXCEPTION()) |
| { |
| <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}> |
| } |
| else |
| { |
| <(ruleDescriptor.actions.after):execAfter()> |
| } |
| <else> |
| <if(!emptyRule)> |
| <if(actions.(actionScope).rulecatch)> |
| <actions.(actionScope).rulecatch> |
| <else> |
| if (HASEXCEPTION()) |
| { |
| PREPORTERROR(); |
| PRECOVER(); |
| <@setErrorReturnValue()> |
| } |
| <if(ruleDescriptor.actions.after)> |
| else |
| { |
| <(ruleDescriptor.actions.after):execAfter()> |
| }<\n> |
| <endif> |
| <endif> |
| <endif> |
| <endif> |
| |
| <if(trace)>ANTLR3_PRINTF("exit <ruleName> %d failed=%s backtracking=%s\n", LT(1), failed, BACKTRACKING);<endif> |
| <memoize()> |
| <if(finally)> |
| <finalCode(finalBlock=finally)> |
| <endif> |
| <scopeClean()> |
| <@postamble()> |
| return <ruleReturnValue()>; |
| } |
| /* $ANTLR end <ruleName> */ |
| >> |
| |
| finalCode(finalBlock) ::= << |
| { |
| <finalBlock> |
| } |
| |
| >> |
| |
| catch(decl,action) ::= << |
| /* catch(decl,action) |
| */ |
| { |
| <e.action> |
| } |
| |
| >> |
| |
| ruleDeclarations() ::= << |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <returnType()> retval;<\n> |
| <else> |
| <ruleDescriptor.returnScope.attributes:{ a | |
| <a.type> <a.name>; |
| }> |
| <endif> |
| <if(memoize)> |
| ANTLR3_UINT32 <ruleDescriptor.name>_StartIndex; |
| <endif> |
| >> |
| |
| ruleInitializations() ::= << |
| /* Initialize rule variables |
| */ |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <ruleDescriptor.returnScope.attributes:{ a | |
| <if(a.initValue)>retval.<a.name> = <a.initValue>;<endif> |
| }> |
| <else> |
| <ruleDescriptor.returnScope.attributes:{ a | |
| <if(a.initValue)><a.name> = <a.initValue>;<endif> |
| }> |
| <endif> |
| <if(memoize)> |
| <ruleDescriptor.name>_StartIndex = INDEX();<\n> |
| <endif> |
| <ruleDescriptor.useScopes:{it |<scopeTop(it)> = <scopePush(it)>;}; separator="\n"> |
| <ruleDescriptor.ruleScope:{it |<scopeTop(it.name)> = <scopePush(it.name)>;}; separator="\n"> |
| >> |
| |
| ruleLabelDefs() ::= << |
| <[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels] |
| :{it |<labelType> <it.label.text>;}; separator="\n" |
| > |
| <[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels] |
| :{it |pANTLR3_VECTOR list_<it.label.text>;}; separator="\n" |
| > |
| <[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels] |
| :ruleLabelDef(); separator="\n" |
| > |
| >> |
| |
| ruleLabelInitializations() ::= << |
| <[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels] |
| :{it |<it.label.text> = NULL;}; separator="\n" |
| > |
| <[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels] |
| :{it |list_<it.label.text> = NULL;}; separator="\n" |
| > |
| <[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels] |
| :ruleLabelInitVal(); separator="\n" |
| > |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <if(!ruleDescriptor.isSynPred)> |
| retval.start = LT(1); retval.stop = retval.start;<\n> |
| <endif> |
| <endif> |
| >> |
| |
| lexerRuleLabelDefs() ::= << |
| <[ruleDescriptor.tokenLabels, |
| ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleLabels] |
| :{it |<labelType> <it.label.text>;}; separator="\n" |
| > |
| <ruleDescriptor.charLabels:{it |ANTLR3_UINT32 <it.label.text>;}; separator="\n"> |
| <[ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleListLabels, |
| ruleDescriptor.ruleListLabels] |
| :{it |pANTLR3_INT_TRIE list_<it.label.text>;}; separator="\n" |
| > |
| >> |
| |
| lexerRuleLabelInit() ::= << |
| <[ruleDescriptor.tokenLabels, |
| ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleLabels] |
| :{it |<it.label.text> = NULL;}; separator="\n" |
| > |
| <[ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleListLabels, |
| ruleDescriptor.ruleListLabels] |
| :{it |list_<it.label.text> = antlr3IntTrieNew(31);}; separator="\n" |
| > |
| >> |
| |
| lexerRuleLabelFree() ::= << |
| <[ruleDescriptor.tokenLabels, |
| ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleLabels] |
| :{it |<it.label.text> = NULL;}; separator="\n" |
| > |
| <[ruleDescriptor.tokenListLabels, |
| ruleDescriptor.ruleListLabels, |
| ruleDescriptor.ruleListLabels] |
| :{it |list_<it.label.text>->free(list_<it.label.text>);}; separator="\n" |
| > |
| >> |
| |
| ruleReturnValue() ::= <% |
| <if(!ruleDescriptor.isSynPred)> |
| <if(ruleDescriptor.hasReturnValue)> |
| <if(ruleDescriptor.hasSingleReturnValue)> |
| <ruleDescriptor.singleValueReturnName> |
| <else> |
| retval |
| <endif> |
| <endif> |
| <endif> |
| %> |
| |
| memoize() ::= << |
| <if(memoize)> |
| <if(backtracking)> |
| if ( BACKTRACKING>0 ) { MEMOIZE(<ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); } |
| <endif> |
| <endif> |
| >> |
| |
| ruleCleanUp() ::= << |
| |
| // This is where rules clean up and exit |
| // |
| goto rule<ruleDescriptor.name>Ex; /* Prevent compiler warnings */ |
| rule<ruleDescriptor.name>Ex: ; |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <if(!TREE_PARSER)> |
| <if(!ruleDescriptor.isSynPred)> |
| retval.stop = LT(-1);<\n> |
| <endif> |
| <endif> |
| <endif> |
| >> |
| |
| scopeClean() ::= << |
| <ruleDescriptor.useScopes:{it |<scopePop(it)>}; separator="\n"> |
| <ruleDescriptor.ruleScope:{it |<scopePop(it.name)>}; separator="\n"> |
| |
| >> |
| /** How to generate a rule in the lexer; naked blocks are used for |
| * fragment rules, which do not produce tokens. |
| */ |
| lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= << |
| // Comes from: <block.description> |
| /** \brief Lexer rule generated by ANTLR3 |
| * |
| * $ANTLR start <ruleName> |
| * |
| * Looks to match the characters the constitute the token <ruleName> |
| * from the attached input stream. |
| * |
| * |
| * \remark |
| * - lexer->error == ANTLR3_TRUE if an exception was thrown. |
| */ |
| static ANTLR3_INLINE |
| void m<ruleName>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>) |
| { |
| ANTLR3_UINT32 _type; |
| <ruleDeclarations()> |
| <ruleDescriptor.actions.declarations> |
| <lexerRuleLabelDefs()> |
| <if(trace)>System.out.println("enter <ruleName> '"+(char)LA(1)+"' line="+GETLINE()+":"+GETCHARPOSITIONINLINE()+" failed="+failed+" backtracking="+BACKTRACKING);<endif> |
| |
| <if(nakedBlock)> |
| <ruleMemoization(rname=ruleName)> |
| <lexerRuleLabelInit()> |
| <ruleDescriptor.actions.init> |
| |
| <block><\n> |
| <else> |
| <ruleMemoization(rname=ruleName)> |
| <lexerRuleLabelInit()> |
| _type = <ruleName>; |
| |
| <ruleDescriptor.actions.init> |
| |
| <block> |
| LEXSTATE->type = _type; |
| <endif> |
| <if(trace)> ANTLR3_FPRINTF(stderr, "exit <ruleName> '%c' line=%d:%d failed = %d, backtracking =%d\n",LA(1),GETLINE(),GETCHARPOSITIONINLINE(),failed,BACKTRACKING);<endif> |
| <ruleCleanUp()> |
| <lexerRuleLabelFree()> |
| <(ruleDescriptor.actions.after):execAfter()> |
| <memoize> |
| } |
| // $ANTLR end <ruleName> |
| >> |
| |
| /** How to generate code for the implicitly-defined lexer grammar rule |
| * that chooses between lexer rules. |
| */ |
| tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= << |
| /** This is the entry point in to the lexer from an object that |
| * wants to generate the next token, such as a pCOMMON_TOKEN_STREAM |
| */ |
| static void |
| mTokens(p<name> ctx) |
| { |
| <block><\n> |
| |
| goto ruleTokensEx; /* Prevent compiler warnings */ |
| ruleTokensEx: ; |
| } |
| >> |
| |
| // S U B R U L E S |
| |
| /** A (...) subrule with multiple alternatives */ |
| block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= << |
| |
| // <fileName>:<description> |
| { |
| int alt<decisionNumber>=<maxAlt>; |
| <decls> |
| <@predecision()> |
| <decision> |
| <@postdecision()> |
| <@prebranch()> |
| switch (alt<decisionNumber>) |
| { |
| <alts:{a | <altSwitchCase(i,a)>}> |
| } |
| <@postbranch()> |
| } |
| >> |
| |
| /** A rule block with multiple alternatives */ |
| ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= << |
| { |
| // <fileName>:<description> |
| |
| ANTLR3_UINT32 alt<decisionNumber>; |
| |
| alt<decisionNumber>=<maxAlt>; |
| |
| <decls> |
| <@predecision()> |
| <decision> |
| <@postdecision()> |
| switch (alt<decisionNumber>) |
| { |
| <alts:{a | <altSwitchCase(i,a)>}> |
| } |
| } |
| >> |
| |
| ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= << |
| // <fileName>:<description> |
| <decls> |
| <@prealt()> |
| <alts> |
| <@postalt()> |
| >> |
| |
| /** A special case of a (...) subrule with a single alternative */ |
| blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= << |
| // <fileName>:<description> |
| <decls> |
| <@prealt()> |
| <alts> |
| <@postalt()> |
| >> |
| |
| /** A (..)+ block with 1 or more alternatives */ |
| positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= << |
| // <fileName>:<description> |
| { |
| int cnt<decisionNumber>=0; |
| <decls> |
| <@preloop()> |
| |
| for (;;) |
| { |
| int alt<decisionNumber>=<maxAlt>; |
| <@predecision()> |
| <decision> |
| <@postdecision()> |
| switch (alt<decisionNumber>) |
| { |
| <alts:{a | <altSwitchCase(i,a)>}> |
| default: |
| |
| if ( cnt<decisionNumber> >= 1 ) |
| { |
| goto loop<decisionNumber>; |
| } |
| <ruleBacktrackFailure()> |
| <earlyExitEx()> |
| <@earlyExitException()> |
| goto rule<ruleDescriptor.name>Ex; |
| } |
| cnt<decisionNumber>++; |
| } |
| loop<decisionNumber>: ; /* Jump to here if this rule does not match */ |
| <@postloop()> |
| } |
| >> |
| |
| earlyExitEx() ::= << |
| /* mismatchedSetEx() |
| */ |
| CONSTRUCTEX(); |
| EXCEPTION->type = ANTLR3_EARLY_EXIT_EXCEPTION; |
| EXCEPTION->name = (void *)ANTLR3_EARLY_EXIT_NAME; |
| <\n> |
| >> |
| positiveClosureBlockSingleAlt ::= positiveClosureBlock |
| |
| /** A (..)* block with 1 or more alternatives */ |
| closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= << |
| |
| // <fileName>:<description> |
| <decls> |
| |
| <@preloop()> |
| for (;;) |
| { |
| int alt<decisionNumber>=<maxAlt>; |
| <@predecision()> |
| <decision> |
| <@postdecision()> |
| switch (alt<decisionNumber>) |
| { |
| <alts:{a | <altSwitchCase(i,a)>}> |
| default: |
| goto loop<decisionNumber>; /* break out of the loop */ |
| break; |
| } |
| } |
| loop<decisionNumber>: ; /* Jump out to here if this rule does not match */ |
| <@postloop()> |
| >> |
| |
| closureBlockSingleAlt ::= closureBlock |
| |
| /** Optional blocks (x)? are translated to (x|) by antlr before code generation |
| * so we can just use the normal block template |
| */ |
| optionalBlock ::= block |
| |
| optionalBlockSingleAlt ::= block |
| |
| /** A case in a switch that jumps to an alternative given the alternative |
| * number. A DFA predicts the alternative and then a simple switch |
| * does the jump to the code that actually matches that alternative. |
| */ |
| altSwitchCase(altNum,alt) ::= << |
| case <altNum>: |
| <@prealt()> |
| <alt> |
| break;<\n> |
| >> |
| |
| /** An alternative is just a list of elements; at outermost level */ |
| alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= << |
| // <fileName>:<description> |
| { |
| <@declarations()> |
| <@initializations()> |
| <elements:element()> |
| <rew> |
| <@cleanup()> |
| } |
| >> |
| |
| // E L E M E N T S |
| /** What to emit when there is no rewrite. For auto build |
| * mode, does nothing. |
| */ |
| noRewrite(rewriteBlockLevel, treeLevel) ::= "" |
| |
| /** Dump the elements one per line */ |
| element(e) ::= << |
| <@prematch()> |
| <e.el><\n> |
| >> |
| |
| /** match a token optionally with a label in front */ |
| tokenRef(token,label,elementIndex,terminalOptions) ::= << |
| <if(label)><label> = (<labelType>)<endif> MATCHT(<token>, &FOLLOW_<token>_in_<ruleName><elementIndex>); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| /** ids+=ID */ |
| tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= << |
| <tokenRef(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| listLabel(label,elem) ::= << |
| if (list_<label> == NULL) |
| { |
| list_<label>=ctx->vectors->newVector(ctx->vectors); |
| } |
| list_<label>->add(list_<label>, <elem>, NULL); |
| >> |
| |
| |
| /** match a character */ |
| charRef(char,label) ::= << |
| <if(label)> |
| <label> = LA(1);<\n> |
| <endif> |
| MATCHC(<char>); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| /** match a character range */ |
| charRangeRef(a,b,label) ::= << |
| <if(label)> |
| <label> = LA(1);<\n> |
| <endif> |
| MATCHRANGE(<a>, <b>); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| /** For now, sets are interval tests and must be tested inline */ |
| matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= << |
| <if(label)> |
| <if(LEXER)> |
| <label>= LA(1);<\n> |
| <else> |
| <label>=(<labelType>)LT(1);<\n> |
| <endif> |
| <endif> |
| if ( <s> ) |
| { |
| CONSUME(); |
| <postmatchCode> |
| <if(!LEXER)> |
| PERRORRECOVERY=ANTLR3_FALSE; |
| <endif> |
| <if(backtracking)>FAILEDFLAG=ANTLR3_FALSE;<\n><endif> |
| } |
| else |
| { |
| <ruleBacktrackFailure()> |
| <mismatchedSetEx()> |
| <@mismatchedSetException()> |
| <if(LEXER)> |
| LRECOVER(); |
| <else> |
| <! use following code to make it recover inline; |
| RECOVERFROMMISMATCHEDSET(&FOLLOW_set_in_<ruleName><elementIndex>); |
| !> |
| <endif> |
| goto rule<ruleDescriptor.name>Ex; |
| }<\n> |
| >> |
| |
| mismatchedSetEx() ::= << |
| CONSTRUCTEX(); |
| EXCEPTION->type = ANTLR3_MISMATCHED_SET_EXCEPTION; |
| EXCEPTION->name = (void *)ANTLR3_MISMATCHED_SET_NAME; |
| <if(PARSER)> |
| EXCEPTION->expectingSet = NULL; |
| <! use following code to make it recover inline; |
| EXCEPTION->expectingSet = &FOLLOW_set_in_<ruleName><elementIndex>; |
| !> |
| <endif> |
| >> |
| |
| matchRuleBlockSet ::= matchSet |
| |
| matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= << |
| <matchSet(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| /** Match a string literal */ |
| lexerStringRef(string,label,elementIndex) ::= << |
| <if(label)> |
| <label>Start = GETCHARINDEX(); |
| MATCHS(<string>); |
| <checkRuleBacktrackFailure()> |
| <label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory); |
| <label>->setType(<label>, ANTLR3_TOKEN_INVALID); |
| <label>->setStartIndex(<label>, <label>Start); |
| <label>->setStopIndex(<label>, GETCHARINDEX()-1); |
| <label>->input = INPUT->tnstream->istream; |
| <else> |
| MATCHS(<string>); |
| <checkRuleBacktrackFailure()><\n> |
| <endif> |
| >> |
| |
| wildcard(token,label,elementIndex,terminalOptions) ::= << |
| <if(label)> |
| <label>=(<labelType>)LT(1);<\n> |
| <endif> |
| MATCHANYT(); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= << |
| <wildcard(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| /** Match . wildcard in lexer */ |
| wildcardChar(label, elementIndex) ::= << |
| <if(label)> |
| <label> = LA(1);<\n> |
| <endif> |
| MATCHANY(); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| wildcardCharListLabel(label, elementIndex) ::= << |
| <wildcardChar(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| /** Match a rule reference by invoking it possibly with arguments |
| * and a return value or values. The 'rule' argument was the |
| * target rule name, but now is type Rule, whose toString is |
| * same: the rule name. Now though you can access full rule |
| * descriptor stuff. |
| */ |
| ruleRef(rule,label,elementIndex,args,scope) ::= << |
| FOLLOWPUSH(FOLLOW_<rule.name>_in_<ruleName><elementIndex>); |
| <if(label)><label>=<endif><if(scope)>ctx-><scope:delegateName()>-><endif><rule.name>(ctx<if(scope)>-><scope:delegateName()><endif><if(args)>, <args; separator=", "><endif>);<\n> |
| FOLLOWPOP(); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| /** ids+=r */ |
| ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= << |
| <ruleRef(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| /** A lexer rule reference |
| * The 'rule' argument was the target rule name, but now |
| * is type Rule, whose toString is same: the rule name. |
| * Now though you can access full rule descriptor stuff. |
| */ |
| lexerRuleRef(rule,label,args,elementIndex,scope) ::= << |
| /* <description> */ |
| <if(label)> |
| { |
| ANTLR3_MARKER <label>Start<elementIndex> = GETCHARINDEX(); |
| <if(scope)>ctx-><scope:delegateName()>-><endif>m<rule.name>(ctx<if(scope)>-><scope:delegateName()><endif> <if(args)>, <endif><args; separator=", ">); |
| <checkRuleBacktrackFailure()> |
| <label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory); |
| <label>->setType(<label>, ANTLR3_TOKEN_INVALID); |
| <label>->setStartIndex(<label>, <label>Start<elementIndex>); |
| <label>->setStopIndex(<label>, GETCHARINDEX()-1); |
| <label>->input = INPUT; |
| } |
| <else> |
| <if(scope)>ctx-><scope:delegateName()>-><endif>m<rule.name>(ctx<if(scope)>-><scope:delegateName()><endif> <if(args)>, <endif><args; separator=", ">); |
| <checkRuleBacktrackFailure()> |
| <endif> |
| >> |
| |
| /** i+=INT in lexer */ |
| lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= << |
| <lexerRuleRef(...)> |
| <listLabel(elem=label,...)> |
| >> |
| |
| /** EOF in the lexer */ |
| lexerMatchEOF(label,elementIndex) ::= << |
| <if(label)> |
| { |
| ANTLR3_UINT32 <label>Start<elementIndex>; |
| <labelType> <label>; |
| <label>Start<elementIndex> = GETCHARINDEX(); |
| MATCHC(ANTLR3_CHARSTREAM_EOF); |
| <checkRuleBacktrackFailure()> |
| <label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory); |
| <label>->setType(<label>, ANTLR3_TOKEN_EOF); |
| <label>->setStartIndex(<label>, <label>Start<elementIndex>); |
| <label>->setStopIndex(<label>, GETCHARINDEX()-1); |
| <label>->input = INPUT->tnstream->istream; |
| } |
| <else> |
| MATCHC(ANTLR3_CHARSTREAM_EOF); |
| <checkRuleBacktrackFailure()> |
| <endif> |
| >> |
| |
| // used for left-recursive rules |
| recRuleDefArg() ::= "int <recRuleArg()>" |
| recRuleArg() ::= "_p" |
| recRuleAltPredicate(ruleName,opPrec) ::= "<recRuleArg()> \<= <opPrec>" |
| recRuleSetResultAction() ::= "root_0=$<ruleName>_primary.tree;" |
| recRuleSetReturnAction(src,name) ::= "$<name>=$<src>.<name>;" |
| |
| /** match ^(root children) in tree parser */ |
| tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel, treeLevel) ::= << |
| <root:element()> |
| <actionsAfterRoot:element()> |
| <if(nullableChildList)> |
| if ( LA(1)==ANTLR3_TOKEN_DOWN ) { |
| MATCHT(ANTLR3_TOKEN_DOWN, NULL); |
| <checkRuleBacktrackFailure()> |
| <children:element()> |
| MATCHT(ANTLR3_TOKEN_UP, NULL); |
| <checkRuleBacktrackFailure()> |
| } |
| <else> |
| MATCHT(ANTLR3_TOKEN_DOWN, NULL); |
| <checkRuleBacktrackFailure()> |
| <children:element()> |
| MATCHT(ANTLR3_TOKEN_UP, NULL); |
| <checkRuleBacktrackFailure()> |
| <endif> |
| >> |
| |
| /** Every predicate is used as a validating predicate (even when it is |
| * also hoisted into a prediction expression). |
| */ |
| validateSemanticPredicate(pred,description) ::= << |
| if ( !(<evalPredicate(...)>) ) |
| { |
| <ruleBacktrackFailure()> |
| <newFPE(...)> |
| } |
| >> |
| |
| newFPE() ::= << |
| CONSTRUCTEX(); |
| EXCEPTION->type = ANTLR3_FAILED_PREDICATE_EXCEPTION; |
| EXCEPTION->message = (void *)"<description>"; |
| EXCEPTION->ruleName = (void *)"<ruleName>"; |
| <\n> |
| >> |
| |
| // F i x e d D F A (if-then-else) |
| |
| dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| |
| { |
| int LA<decisionNumber>_<stateNumber> = LA(<k>); |
| <edges; separator="\nelse "> |
| else |
| { |
| <if(eotPredictsAlt)> |
| alt<decisionNumber>=<eotPredictsAlt>; |
| <else> |
| <ruleBacktrackFailure()> |
| |
| <newNVException()> |
| goto rule<ruleDescriptor.name>Ex; |
| |
| <endif> |
| } |
| } |
| >> |
| |
| newNVException() ::= << |
| CONSTRUCTEX(); |
| EXCEPTION->type = ANTLR3_NO_VIABLE_ALT_EXCEPTION; |
| EXCEPTION->message = (void *)"<description>"; |
| EXCEPTION->decisionNum = <decisionNumber>; |
| EXCEPTION->state = <stateNumber>; |
| <@noViableAltException()> |
| <\n> |
| >> |
| |
| /** Same as a normal DFA state except that we don't examine lookahead |
| * for the bypass alternative. It delays error detection but this |
| * is faster, smaller, and more what people expect. For (X)? people |
| * expect "if ( LA(1)==X ) match(X);" and that's it. |
| */ |
| dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| { |
| int LA<decisionNumber>_<stateNumber> = LA(<k>); |
| <edges; separator="\nelse "> |
| } |
| >> |
| |
| /** A DFA state that is actually the loopback decision of a closure |
| * loop. If end-of-token (EOT) predicts any of the targets then it |
| * should act like a default clause (i.e., no error can be generated). |
| * This is used only in the lexer so that for ('a')* on the end of a rule |
| * anything other than 'a' predicts exiting. |
| */ |
| |
| dfaLoopbackStateDecls()::= << |
| ANTLR3_UINT32 LA<decisionNumber>_<stateNumber>; |
| >> |
| dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| { |
| /* dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) |
| */ |
| int LA<decisionNumber>_<stateNumber> = LA(<k>); |
| <edges; separator="\nelse "><\n> |
| <if(eotPredictsAlt)> |
| <if(!edges)> |
| alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !> |
| <else> |
| else |
| { |
| alt<decisionNumber>=<eotPredictsAlt>; |
| }<\n> |
| <endif> |
| <endif> |
| } |
| >> |
| |
| /** An accept state indicates a unique alternative has been predicted */ |
| dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;" |
| |
| /** A simple edge with an expression. If the expression is satisfied, |
| * enter to the target state. To handle gated productions, we may |
| * have to evaluate some predicates for this edge. |
| */ |
| dfaEdge(labelExpr, targetState, predicates) ::= << |
| if ( <if(predicates)>(<predicates>) && <endif>(<labelExpr>)) |
| { |
| <targetState> |
| } |
| >> |
| |
| // F i x e d D F A (switch case) |
| |
| /** A DFA state where a SWITCH may be generated. The code generator |
| * decides if this is possible: CodeGenerator.canGenerateSwitch(). |
| */ |
| dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| switch ( LA(<k>) ) |
| { |
| <edges; separator="\n"> |
| |
| default: |
| <if(eotPredictsAlt)> |
| alt<decisionNumber>=<eotPredictsAlt>; |
| <else> |
| <ruleBacktrackFailure()> |
| <newNVException()> |
| goto rule<ruleDescriptor.name>Ex;<\n> |
| <endif> |
| }<\n> |
| >> |
| |
| dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| switch ( LA(<k>) ) |
| { |
| <edges; separator="\n"> |
| }<\n> |
| >> |
| |
| dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= << |
| switch ( LA(<k>) ) |
| { |
| <edges; separator="\n"><\n> |
| <if(eotPredictsAlt)> |
| default: |
| alt<decisionNumber>=<eotPredictsAlt>; |
| break;<\n> |
| <endif> |
| }<\n> |
| >> |
| |
| dfaEdgeSwitch(labels, targetState) ::= << |
| <labels:{it |case <it>:}; separator="\n"> |
| { |
| <targetState> |
| } |
| break; |
| >> |
| |
| // C y c l i c D F A |
| |
| /** The code to initiate execution of a cyclic DFA; this is used |
| * in the rule to predict an alt just like the fixed DFA case. |
| * The <name> attribute is inherited via the parser, lexer, ... |
| */ |
| dfaDecision(decisionNumber,description) ::= << |
| alt<decisionNumber> = cdfa<decisionNumber>.predict(ctx, RECOGNIZER, ISTREAM, &cdfa<decisionNumber>); |
| <checkRuleBacktrackFailure()> |
| >> |
| |
| /* Dump DFA tables as static initialized arrays of shorts(16 bits)/characters(8 bits) |
| * which are then used to statically initialize the dfa structure, which means that there |
| * is no runtime initialization whatsoever, other than anything the C compiler might |
| * need to generate. In general the C compiler will lay out memory such that there is no |
| * runtime code required. |
| */ |
| cyclicDFA(dfa) ::= << |
| /** Static dfa state tables for Cyclic dfa: |
| * <dfa.description> |
| */ |
| static const ANTLR3_INT32 dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] = |
| { |
| <dfa.eot; wrap="\n", separator=", ", null="-1"> |
| }; |
| static const ANTLR3_INT32 dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] = |
| { |
| <dfa.eof; wrap="\n", separator=", ", null="-1"> |
| }; |
| static const ANTLR3_INT32 dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] = |
| { |
| <dfa.min; wrap="\n", separator=", ", null="-1"> |
| }; |
| static const ANTLR3_INT32 dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] = |
| { |
| <dfa.max; wrap="\n", separator=", ", null="-1"> |
| }; |
| static const ANTLR3_INT32 dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] = |
| { |
| <dfa.accept; wrap="\n", separator=", ", null="-1"> |
| }; |
| static const ANTLR3_INT32 dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] = |
| { |
| <dfa.special; wrap="\n", separator=", ", null="-1"> |
| }; |
| |
| /** Used when there is no transition table entry for a particular state */ |
| #define dfa<dfa.decisionNumber>_T_empty NULL |
| |
| <dfa.edgeTransitionClassMap.keys:{ table | |
| static const ANTLR3_INT32 dfa<dfa.decisionNumber>_T<i0>[] = |
| { |
| <table; separator=", ", wrap="\n", null="-1"> |
| \};<\n>}; null = ""> |
| |
| /* Transition tables are a table of sub tables, with some tables |
| * reused for efficiency. |
| */ |
| static const ANTLR3_INT32 * const dfa<dfa.decisionNumber>_transitions[] = |
| { |
| <dfa.transitionEdgeTables:{xref|dfa<dfa.decisionNumber>_T<xref>}; separator=", ", wrap="\n", null="NULL"> |
| }; |
| |
| <if(dfa.specialStateSTs)> |
| static ANTLR3_INT32 dfa<dfa.decisionNumber>_sst(p<name> ctx, pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, pANTLR3_CYCLIC_DFA dfa, ANTLR3_INT32 s) |
| { |
| ANTLR3_INT32 _s; |
| |
| _s = s; |
| switch (s) |
| { |
| <dfa.specialStateSTs:{state | |
| case <i0>: |
| |
| <state>}; separator="\n"> |
| } |
| <if(backtracking)> |
| if (BACKTRACKING > 0) |
| { |
| FAILEDFLAG = ANTLR3_TRUE; |
| return -1; |
| } |
| <endif> |
| |
| CONSTRUCTEX(); |
| EXCEPTION->type = ANTLR3_NO_VIABLE_ALT_EXCEPTION; |
| EXCEPTION->message = (void *)"<dfa.description>"; |
| EXCEPTION->decisionNum = <dfa.decisionNumber>; |
| EXCEPTION->state = _s; |
| <@noViableAltException()> |
| return -1; |
| } |
| <endif> |
| |
| <@errorMethod()> |
| |
| /* Declare tracking structure for Cyclic DFA <dfa.decisionNumber> |
| */ |
| static |
| ANTLR3_CYCLIC_DFA cdfa<dfa.decisionNumber> |
| = { |
| <dfa.decisionNumber>, /* Decision number of this dfa */ |
| /* Which decision this represents: */ |
| (const pANTLR3_UCHAR)"<dfa.description>", |
| <if(dfa.specialStateSTs)> |
| (CDFA_SPECIAL_FUNC) dfa<dfa.decisionNumber>_sst, |
| <else> |
| (CDFA_SPECIAL_FUNC) antlr3dfaspecialStateTransition, /* Default special state transition function */ |
| <endif> |
| |
| antlr3dfaspecialTransition, /* DFA specialTransition is currently just a default function in the runtime */ |
| antlr3dfapredict, /* DFA simulator function is in the runtime */ |
| dfa<dfa.decisionNumber>_eot, /* EOT table */ |
| dfa<dfa.decisionNumber>_eof, /* EOF table */ |
| dfa<dfa.decisionNumber>_min, /* Minimum tokens for each state */ |
| dfa<dfa.decisionNumber>_max, /* Maximum tokens for each state */ |
| dfa<dfa.decisionNumber>_accept, /* Accept table */ |
| dfa<dfa.decisionNumber>_special, /* Special transition states */ |
| dfa<dfa.decisionNumber>_transitions /* Table of transition tables */ |
| |
| }; |
| /* End of Cyclic DFA <dfa.decisionNumber> |
| * --------------------- |
| */ |
| >> |
| |
| /** A state in a cyclic DFA; it's a special state and part of a big switch on |
| * state. |
| */ |
| cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= << |
| { |
| ANTLR3_UINT32 LA<decisionNumber>_<stateNumber>;<\n> |
| ANTLR3_MARKER index<decisionNumber>_<stateNumber>;<\n> |
| |
| LA<decisionNumber>_<stateNumber> = LA(1);<\n> |
| <if(semPredState)> <! get next lookahead symbol to test edges, then rewind !> |
| index<decisionNumber>_<stateNumber> = INDEX();<\n> |
| REWINDLAST();<\n> |
| <endif> |
| s = -1; |
| <edges; separator="\nelse "> |
| <if(semPredState)> <! return input cursor to state before we rewound !> |
| SEEK(index<decisionNumber>_<stateNumber>);<\n> |
| <endif> |
| if ( s>=0 ) |
| { |
| return s; |
| } |
| } |
| break; |
| >> |
| |
| /** Just like a fixed DFA edge, test the lookahead and indicate what |
| * state to jump to next if successful. |
| */ |
| cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= << |
| if ( <if(predicates)>(<predicates>) && <endif>(<labelExpr>) ) |
| { |
| s = <targetStateNumber>; |
| }<\n> |
| >> |
| |
| /** An edge pointing at end-of-token; essentially matches any char; |
| * always jump to the target. |
| */ |
| eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= << |
| s = <targetStateNumber>;<\n> |
| >> |
| |
| |
| // D F A E X P R E S S I O N S |
| |
| andPredicates(left,right) ::= "( (<left>) && (<right>) )" |
| |
| orPredicates(operands) ::= "((<first(operands)>)<rest(operands):{o | ||(<o>)}>)" |
| |
| notPredicate(pred) ::= "!( <evalPredicate(pred,{})> )" |
| |
| evalPredicate(pred,description) ::= "(<pred>)" |
| |
| evalSynPredicate(pred,description) ::= "<pred>(ctx)" |
| |
| lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>" |
| |
| /** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable |
| * somewhere. Must ask for the lookahead directly. |
| */ |
| isolatedLookaheadTest(atom,k,atomAsInt) ::= "LA(<k>) == <atom>" |
| |
| lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <% |
| ((LA<decisionNumber>_<stateNumber> >= <lower>) && (LA<decisionNumber>_<stateNumber> \<= <upper>)) |
| %> |
| |
| isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "((LA(<k>) >= <lower>) && (LA(<k>) \<= <upper>))" |
| |
| setTest(ranges) ::= "<ranges; separator=\" || \">" |
| |
| // A T T R I B U T E S |
| |
| makeScopeSet() ::= << |
| /* makeScopeSet() |
| */ |
| /** Definition of the <scope.name> scope variable tracking |
| * structure. An instance of this structure is created by calling |
| * <name>_<scope.name>Push(). |
| */ |
| typedef struct <scopeStruct(sname=scope.name,...)>_struct |
| { |
| /** Function that the user may provide to be called when the |
| * scope is destroyed (so you can free pANTLR3_HASH_TABLES and so on) |
| * |
| * \param POinter to an instance of this typedef/struct |
| */ |
| void (ANTLR3_CDECL *free) (struct <scopeStruct(sname=scope.name,...)>_struct * frame); |
| |
| /* ============================================================================= |
| * Programmer defined variables... |
| */ |
| <scope.attributes:{it |<it.decl>;}; separator="\n"> |
| |
| /* End of programmer defined variables |
| * ============================================================================= |
| */ |
| } |
| <scopeStruct(sname=scope.name,...)>, * <scopeType(sname=scope.name,...)>; |
| |
| >> |
| |
| globalAttributeScopeDecl(scope) ::= << |
| <if(scope.attributes)> |
| /* globalAttributeScopeDecl(scope) |
| */ |
| <makeScopeSet(...)> |
| <endif> |
| >> |
| |
| ruleAttributeScopeDecl(scope) ::= << |
| <if(scope.attributes)> |
| /* ruleAttributeScopeDecl(scope) |
| */ |
| <makeScopeSet(...)> |
| <endif> |
| >> |
| |
| globalAttributeScopeFuncDecl(scope) ::= |
| << |
| /* globalAttributeScopeFuncDecl(scope) |
| */ |
| <if(scope.attributes)> |
| /* ----------------------------------------------------------------------------- |
| * Function declaration for creating a <name>_<scope.name> scope set |
| */ |
| static <scopeType(sname=scope.name,...)> <scopePushName(sname=scope.name,...)>(p<name> ctx); |
| static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope); |
| /* ----------------------------------------------------------------------------- */ |
| |
| <endif> |
| >> |
| |
| globalAttributeScopeFuncMacro(scope) ::= << |
| <if(scope.attributes)> |
| /* globalAttributeScopeFuncMacro(scope) |
| */ |
| /** Function for popping the top value from a <scopeStack(sname=scope.name)> |
| */ |
| void |
| <scopePopName(sname=scope.name,...)>(p<name> ctx) |
| { |
| // First see if the user defined a function they want to be called when a |
| // scope is popped/freed. |
| // |
| // If the user supplied the scope entries with a free function,then call it first |
| // |
| if (SCOPE_TOP(<scope.name>)->free != NULL) |
| { |
| SCOPE_TOP(<scope.name>)->free(SCOPE_TOP(<scope.name>)); |
| } |
| |
| // Now we decrement the scope's upper limit bound. We do not actually pop the scope as |
| // we want to reuse scope entries if we do continuous push and pops. Most scopes don't |
| // next too far so we don't want to keep freeing and allocating them |
| // |
| ctx-><scopeStack(sname=scope.name,...)>_limit--; |
| SCOPE_TOP(<scope.name>) = (<scopeType(sname=scope.name)>)(ctx-><scopeStack(sname=scope.name,...)>->get(ctx-><bscopeStack(sname=scope.name,...)>, ctx-><scopeStack(sname=scope.name,...)>_limit - 1)); |
| } |
| <endif> |
| >> |
| |
| ruleAttributeScopeFuncDecl(scope) ::= << |
| <if(scope.attributes)> |
| /* ruleAttributeScopeFuncDecl(scope) |
| */ |
| /* ----------------------------------------------------------------------------- |
| * Function declarations for creating a <name>_<scope.name> scope set |
| */ |
| static <scopeType(sname=scope.name,...)> <scopePushName(sname=scope.name,...)>(p<name> ctx); |
| static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope); |
| /* ----------------------------------------------------------------------------- */ |
| |
| <endif> |
| >> |
| |
| ruleAttributeScopeFuncMacro(scope) ::= << |
| <if(scope.attributes)> |
| /* ruleAttributeScopeFuncMacro(scope) |
| */ |
| /** Function for popping the top value from a <scopeStack(sname=scope.name,...)> |
| */ |
| void |
| <scopePopName(sname=scope.name,...)>(p<name> ctx) |
| { |
| // First see if the user defined a function they want to be called when a |
| // scope is popped/freed. |
| // |
| // If the user supplied the scope entries with a free function,then call it first |
| // |
| if (SCOPE_TOP(<scope.name>)->free != NULL) |
| { |
| SCOPE_TOP(<scope.name>)->free(SCOPE_TOP(<scope.name>)); |
| } |
| |
| // Now we decrement the scope's upper limit bound. We do not actually pop the scope as |
| // we want to reuse scope entries if we do continuous push and pops. Most scopes don't |
| // next too far so we don't want to keep freeing and allocating them |
| // |
| ctx-><scopeStack(sname=scope.name,...)>_limit--; |
| SCOPE_TOP(<scope.name>) = (<scopeType(sname=scope.name)>)(ctx-><scopeStack(sname=scope.name,...)>->get(ctx-><scopeStack(sname=scope.name,...)>, ctx-><scopeStack(sname=scope.name,...)>_limit - 1)); |
| } |
| |
| <endif> |
| >> |
| |
| globalAttributeScopeDef(scope) ::= |
| << |
| /* globalAttributeScopeDef(scope) |
| */ |
| <if(scope.attributes)> |
| /** Pointer to the <scope.name> stack for use by <scopePushName(sname=scope.name)>() |
| * and <scopePopName(sname=scope.name,...)>() |
| */ |
| pANTLR3_STACK <scopeStack(sname=scope.name)>; |
| ANTLR3_UINT32 <scopeStack(sname=scope.name)>_limit; |
| /** Pointer to the top of the stack for the global scope <scopeStack(sname=scope.name)> |
| */ |
| <scopeType(sname=scope.name,...)> (*<scopePushName(sname=scope.name,...)>)(struct <name>_Ctx_struct * ctx); |
| <scopeType(sname=scope.name,...)> <scopeTopDecl(sname=scope.name,...)>; |
| |
| <endif> |
| >> |
| |
| ruleAttributeScopeDef(scope) ::= << |
| <if(scope.attributes)> |
| /* ruleAttributeScopeDef(scope) |
| */ |
| /** Pointer to the <scope.name> stack for use by <scopePushName(sname=scope.name)>() |
| * and <scopePopName(sname=scope.name,...)>() |
| */ |
| pANTLR3_STACK <scopeStack(sname=scope.name,...)>; |
| ANTLR3_UINT32 <scopeStack(sname=scope.name,...)>_limit; |
| <scopeType(sname=scope.name,...)> (*<scopePushName(sname=scope.name,...)>)(struct <name>_Ctx_struct * ctx); |
| <scopeType(sname=scope.name,...)> <scopeTopDecl(sname=scope.name,...)>; |
| |
| <endif> |
| >> |
| |
| globalAttributeScopeFuncs(scope) ::= << |
| <if(scope.attributes)> |
| /* globalAttributeScopeFuncs(scope) |
| */ |
| <attributeFuncs(scope)> |
| <endif> |
| >> |
| |
| ruleAttributeScopeFuncs(scope) ::= << |
| <if(scope.attributes)> |
| /* ruleAttributeScopeFuncs(scope) |
| */ |
| <attributeFuncs(scope)> |
| <endif> |
| >> |
| |
| globalAttributeScope(scope) ::= << |
| <if(scope.attributes)> |
| /* globalAttributeScope(scope) |
| */ |
| ctx-><scopePushName(sname=scope.name,...)> = <scopePushName(sname=scope.name,...)>; |
| ctx-><scopeStack(sname=scope.name,...)> = antlr3StackNew(0); |
| ctx-><scopeStack(sname=scope.name,...)>_limit = 0; |
| <scopeTop(sname=scope.name,...)> = NULL; |
| <endif> |
| >> |
| |
| ruleAttributeScope(scope) ::= |
| << |
| <if(scope.attributes)> |
| /* ruleAttributeScope(scope) |
| */ |
| ctx-><scopePushName(sname=scope.name,...)> = <scopePushName(sname=scope.name,...)>; |
| ctx-><scopeStack(sname=scope.name,...)> = antlr3StackNew(0); |
| ctx-><scopeStack(sname=scope.name,...)>_limit = 0; |
| <scopeTop(sname=scope.name,...)> = NULL; |
| <endif> |
| >> |
| globalAttributeScopeFree(scope) ::= << |
| <if(scope.attributes)> |
| /* globalAttributeScope(scope) |
| */ |
| ctx-><scopeStack(sname=scope.name,...)>->free(ctx-><scopeStack(sname=scope.name,...)>); |
| <endif> |
| >> |
| |
| ruleAttributeScopeFree(scope) ::= |
| << |
| <if(scope.attributes)> |
| /* ruleAttributeScope(scope) |
| */ |
| ctx-><scopeStack(sname=scope.name,...)>->free(ctx-><scopeStack(sname=scope.name,...)>); |
| <endif> |
| >> |
| |
| scopeTopDecl(sname) ::= << |
| p<name>_<sname>Top |
| >> |
| |
| scopeTop(sname) ::= << |
| ctx-><scopeTopDecl(sname=sname,...)> |
| >> |
| |
| scopePop(sname) ::= << |
| <scopePopName(sname=sname,...)>(ctx); |
| >> |
| |
| scopePush(sname) ::= << |
| p<name>_<sname>Push(ctx) |
| >> |
| |
| scopePopName(sname) ::= << |
| p<name>_<sname>Pop |
| >> |
| |
| scopePushName(sname) ::= << |
| p<name>_<sname>Push |
| >> |
| |
| scopeType(sname) ::= << |
| p<name>_<sname>_SCOPE |
| >> |
| |
| scopeStruct(sname) ::= << |
| <name>_<sname>_SCOPE |
| >> |
| |
| scopeStack(sname) ::= << |
| p<name>_<sname>Stack |
| >> |
| |
| attributeFuncs(scope) ::= << |
| <if(scope.attributes)> |
| /* attributeFuncs(scope) |
| */ |
| |
| static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope) |
| { |
| ANTLR3_FREE(scope); |
| } |
| |
| /** \brief Allocate initial memory for a <name> <scope.name> scope variable stack entry and |
| * add it to the top of the stack. |
| * |
| * \remark |
| * By default the structure is freed with ANTLR_FREE(), but you can use the |
| * the \@init action to install a pointer to a custom free() routine by |
| * adding the code: |
| * \code |
| * <scopeTop(sname=scope.name)>->free = myroutine; |
| * \endcode |
| * |
| * With lots of comments of course! The routine should be declared in |
| * \@members { } as: |
| * \code |
| * void ANTLR3_CDECL myfunc( <scopeType(sname=scope.name)> ptr). |
| * \endcode |
| * |
| * It should perform any custom freeing stuff that you need (call ANTLR_FREE3, not free() |
| * NB: It should not free the pointer it is given, which is the scope stack entry itself |
| * and will be freed by the function that calls your custom free routine. |
| * |
| */ |
| static <scopeType(sname=scope.name)> |
| <scopePushName(sname=scope.name)>(p<name> ctx) |
| { |
| /* Pointer used to create a new set of attributes |
| */ |
| <scopeType(sname=scope.name)> newAttributes; |
| |
| /* Allocate the memory for a new structure if we need one. |
| */ |
| if (ctx-><scopeStack(sname=scope.name)>->size(ctx-><scopeStack(sname=scope.name)>) > ctx-><scopeStack(sname=scope.name)>_limit) |
| { |
| // The current limit value was less than the number of scopes available on the stack so |
| // we can just reuse one. Our limit tracks the stack count, so the index of the entry we want |
| // is one less than that, or conveniently, the current value of limit. |
| // |
| newAttributes = (<scopeType(sname=scope.name)>)ctx-><scopeStack(sname=scope.name)>->get(ctx-><scopeStack(sname=scope.name)>, ctx-><scopeStack(sname=scope.name)>_limit); |
| } |
| else |
| { |
| // Need a new allocation |
| // |
| newAttributes = (<scopeType(sname=scope.name)>) ANTLR3_MALLOC(sizeof(<scopeStruct(sname=scope.name)>)); |
| if (newAttributes != NULL) |
| { |
| /* Standard ANTLR3 library implementation |
| */ |
| ctx-><scopeStack(sname=scope.name)>->push(ctx-><scopeStack(sname=scope.name)>, newAttributes, (void (*)(void *))<scope.name>Free); |
| } |
| } |
| |
| // Blank out any previous free pointer, the user might or might install a new one. |
| // |
| newAttributes->free = NULL; |
| |
| // Indicate the position in the available stack that the current level is at |
| // |
| ctx-><scopeStack(sname=scope.name)>_limit++; |
| |
| /* Return value is the pointer to the new entry, which may be used locally |
| * without de-referencing via the context. |
| */ |
| return newAttributes; |
| }<\n> |
| |
| <endif> |
| >> |
| returnStructName(r) ::= "<r.name>_return" |
| |
| returnType() ::= <% |
| <if(!ruleDescriptor.isSynPred)> |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| <ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()> |
| <else> |
| <if(ruleDescriptor.hasSingleReturnValue)> |
| <ruleDescriptor.singleValueReturnType> |
| <else> |
| void |
| <endif> |
| <endif> |
| <else> |
| ANTLR3_BOOLEAN |
| <endif> |
| %> |
| |
| /** Generate the C type associated with a single or multiple return |
| * value(s). |
| */ |
| ruleLabelType(referencedRule) ::= <% |
| <if(referencedRule.hasMultipleReturnValues)> |
| <referencedRule.grammar.recognizerName>_<referencedRule.name>_return |
| <else> |
| <if(referencedRule.hasSingleReturnValue)> |
| <referencedRule.singleValueReturnType> |
| <else> |
| void |
| <endif> |
| <endif> |
| %> |
| |
| delegateName(d) ::= << |
| <if(d.label)><d.label><else>g<d.name><endif> |
| >> |
| |
| /** Using a type to init value map, try to init a type; if not in table |
| * must be an object, default value is "0". |
| */ |
| initValue(typeName) ::= << |
| = <cTypeInitMap.(typeName)> |
| >> |
| |
| /** Define a rule label */ |
| ruleLabelDef(label) ::= << |
| <ruleLabelType(referencedRule=label.referencedRule)> <label.label.text>; |
| #undef RETURN_TYPE_<label.label.text> |
| #define RETURN_TYPE_<label.label.text> <ruleLabelType(referencedRule=label.referencedRule)><\n> |
| >> |
| /** Rule label default value */ |
| ruleLabelInitVal(label) ::= << |
| >> |
| |
| ASTLabelType() ::= "<if(recognizer.ASTLabelType)><recognizer.ASTLabelType><else>pANTLR3_BASE_TREE<endif>" |
| |
| /** Define a return struct for a rule if the code needs to access its |
| * start/stop tokens, tree stuff, attributes, ... Leave a hole for |
| * subgroups to stick in members. |
| */ |
| returnScope(scope) ::= << |
| <if(!ruleDescriptor.isSynPred)> |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| typedef struct <ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>_struct |
| { |
| <if(!TREE_PARSER)> |
| /** Generic return elements for ANTLR3 rules that are not in tree parsers or returning trees |
| */ |
| pANTLR3_COMMON_TOKEN start; |
| pANTLR3_COMMON_TOKEN stop; |
| <else> |
| <recognizer.ASTLabelType> start; |
| <recognizer.ASTLabelType> stop; |
| <endif> |
| <@ruleReturnMembers()> |
| <ruleDescriptor.returnScope.attributes:{it |<it.type> <it.name>;}; separator="\n"> |
| } |
| <ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>;<\n><\n> |
| <endif> |
| <endif> |
| >> |
| |
| parameterScope(scope) ::= << |
| <scope.attributes:{it |<it.decl>}; separator=", "> |
| >> |
| |
| parameterAttributeRef(attr) ::= "<attr.name>" |
| parameterSetAttributeRef(attr,expr) ::= "<attr.name>=<expr>;" |
| |
| /** Note that the scopeAttributeRef does not have access to the |
| * grammar name directly |
| */ |
| scopeAttributeRef(scope,attr,index,negIndex) ::= <% |
| <if(negIndex)> |
| ((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get( ctx->SCOPE_STACK(<scope>), ctx->SCOPE_STACK(<scope>)->size(ctx->SCOPE_STACK(<scope>)) - <negIndex> - 1) ))-><attr.name> |
| <else> |
| <if(index)> |
| ((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get(ctx->SCOPE_STACK(<scope>), (ANTLR3_UINT32)<index> ) ))-><attr.name> |
| <else> |
| (SCOPE_TOP(<scope>))-><attr.name> |
| <endif> |
| <endif> |
| %> |
| |
| scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <% |
| <if(negIndex)> |
| ((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get( ctx->SCOPE_STACK(<scope>), ctx->SCOPE_STACK(<scope>)->size(ctx->SCOPE_STACK(<scope>)) - <negIndex> - 1) ))-><attr.name> = <expr>; |
| <else> |
| <if(index)> |
| ((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get(ctx->SCOPE_STACK(<scope>), (ANTLR3_UINT32)<index> ) ))-><attr.name> = <expr>; |
| <else> |
| (SCOPE_TOP(<scope>))-><attr.name>=<expr>; |
| <endif> |
| <endif> |
| %> |
| |
| /** $x is either global scope or x is rule with dynamic scope; refers |
| * to stack itself not top of stack. This is useful for predicates |
| * like {$function.size()>0 && $function::name.equals("foo")}? |
| */ |
| isolatedDynamicScopeRef(scope) ::= "ctx->SCOPE_STACK(<scope>)" |
| |
| /** reference an attribute of rule; might only have single return value */ |
| ruleLabelRef(referencedRule,scope,attr) ::= << |
| <if(referencedRule.hasMultipleReturnValues)> |
| <scope>.<attr.name> |
| <else> |
| <scope> |
| <endif> |
| >> |
| |
| returnAttributeRef(ruleDescriptor,attr) ::= << |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| retval.<attr.name> |
| <else> |
| <attr.name> |
| <endif> |
| >> |
| |
| returnSetAttributeRef(ruleDescriptor,attr,expr) ::= << |
| <if(ruleDescriptor.hasMultipleReturnValues)> |
| retval.<attr.name>=<expr>; |
| <else> |
| <attr.name>=<expr>; |
| <endif> |
| >> |
| |
| /** How to translate $tokenLabel */ |
| tokenLabelRef(label) ::= "<label>" |
| |
| /** ids+=ID {$ids} or e+=expr {$e} */ |
| listLabelRef(label) ::= "list_<label>" |
| |
| |
| // not sure the next are the right approach |
| // |
| tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText(<scope>))" |
| tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>->getType(<scope>))" |
| tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>->getLine(<scope>))" |
| tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>->getCharPositionInLine(<scope>))" |
| tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>->getChannel(<scope>))" |
| tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>->getTokenIndex(<scope>))" |
| tokenLabelPropertyRef_tree(scope,attr) ::= "(<scope>->tree)" |
| tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>->getText(<scope>)->toInt32(<scope>->getText(<scope>)))" |
| |
| ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>.start)" |
| ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>.stop)" |
| ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>.tree)" |
| ruleLabelPropertyRef_text(scope,attr) ::= << |
| <if(TREE_PARSER)> |
| (STRSTREAM->toStringSS(STRSTREAM, <scope>.start, <scope>.start)) |
| <else> |
| (STRSTREAM->toStringTT(STRSTREAM, <scope>.start, <scope>.stop)) |
| <endif> |
| >> |
| |
| ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st" |
| |
| /** Isolated $RULE ref ok in lexer as it's a Token */ |
| lexerRuleLabel(label) ::= "<label>" |
| |
| lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>->getType(<scope>))" |
| lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>->getLine(<scope>))" |
| lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>->getCharPositionInLine(<scope>))" |
| lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>->getChannel(<scope>))" |
| lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>->getTokenIndex(<scope>))" |
| lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText(<scope>))" |
| |
| // Somebody may ref $template or $tree or $stop within a rule: |
| rulePropertyRef_start(scope,attr) ::= "retval.start" |
| rulePropertyRef_stop(scope,attr) ::= "retval.stop" |
| rulePropertyRef_tree(scope,attr) ::= "retval.tree" |
| rulePropertyRef_text(scope,attr) ::= << |
| <if(TREE_PARSER)> |
| INPUT->toStringSS(INPUT, ADAPTOR->getTokenStartIndex(ADAPTOR, retval.start), ADAPTOR->getTokenStopIndex(ADAPTOR, retval.start)) |
| <else> |
| STRSTREAM->toStringTT(STRSTREAM, retval.start, LT(-1)) |
| <endif> |
| >> |
| rulePropertyRef_st(scope,attr) ::= "retval.st" |
| |
| lexerRulePropertyRef_text(scope,attr) ::= "LEXER->getText(LEXER)" |
| lexerRulePropertyRef_type(scope,attr) ::= "_type" |
| lexerRulePropertyRef_line(scope,attr) ::= "LEXSTATE->tokenStartLine" |
| lexerRulePropertyRef_pos(scope,attr) ::= "LEXSTATE->tokenStartCharPositionInLine" |
| lexerRulePropertyRef_channel(scope,attr) ::= "LEXSTATE->channel" |
| lexerRulePropertyRef_start(scope,attr) ::= "LEXSTATE->tokenStartCharIndex" |
| lexerRulePropertyRef_stop(scope,attr) ::= "(LEXER->getCharIndex(LEXER)-1)" |
| lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer |
| lexerRulePropertyRef_int(scope,attr) ::= "LEXER->getText(LEXER)->toInt32(LEXER->getText(LEXER))" |
| |
| |
| // setting $st and $tree is allowed in local rule. everything else is flagged as error |
| ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree=<expr>;" |
| ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st=<expr>;" |
| |
| |
| /** How to deal with an @after for C targets. Because we cannot rely on |
| * any garbage collection, after code is executed even in backtracking |
| * mode. Must be documented clearly. |
| */ |
| execAfter(action) ::= << |
| { |
| <action> |
| } |
| >> |
| |
| /** How to execute an action (when not backtracking) */ |
| execAction(action) ::= << |
| <if(backtracking)> |
| <if(actions.(actionScope).synpredgate)> |
| if ( <actions.(actionScope).synpredgate> ) |
| { |
| <action> |
| } |
| <else> |
| if ( BACKTRACKING == 0 ) |
| { |
| <action> |
| } |
| <endif> |
| <else> |
| { |
| <action> |
| } |
| <endif> |
| >> |
| |
| // M I S C (properties, etc...) |
| |
| bitsetDeclare(name, words64) ::= << |
| |
| /** Bitset defining follow set for error recovery in rule state: <name> */ |
| static ANTLR3_BITWORD <name>_bits[] = { <words64:{it |ANTLR3_UINT64_LIT(<it>)}; separator=", "> }; |
| static ANTLR3_BITSET_LIST <name> = { <name>_bits, <length(words64)> }; |
| >> |
| |
| bitset(name, words64) ::= << |
| antlr3BitsetSetAPI(&<name>);<\n> |
| >> |
| |
| codeFileExtension() ::= ".c" |
| |
| true_value() ::= "ANTLR3_TRUE" |
| false_value() ::= "ANTLR3_FALSE" |